]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #94045 - ehuss:update-books, r=ehuss
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6 use rustc_ast as ast;
7 use rustc_attr as attr;
8 use rustc_hir as hir;
9 use rustc_hir::lang_items::LangItem;
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::{Idx, IndexVec};
12 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::{Span, DUMMY_SP};
15 use rustc_target::abi::call::{
16     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
17 };
18 use rustc_target::abi::*;
19 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
20
21 use std::cmp;
22 use std::fmt;
23 use std::iter;
24 use std::num::NonZeroUsize;
25 use std::ops::Bound;
26
27 use rand::{seq::SliceRandom, SeedableRng};
28 use rand_xoshiro::Xoshiro128StarStar;
29
30 pub fn provide(providers: &mut ty::query::Providers) {
31     *providers =
32         ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
33 }
34
35 pub trait IntegerExt {
36     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
37     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
38     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
39     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
40     fn repr_discr<'tcx>(
41         tcx: TyCtxt<'tcx>,
42         ty: Ty<'tcx>,
43         repr: &ReprOptions,
44         min: i128,
45         max: i128,
46     ) -> (Integer, bool);
47 }
48
49 impl IntegerExt for Integer {
50     #[inline]
51     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
52         match (*self, signed) {
53             (I8, false) => tcx.types.u8,
54             (I16, false) => tcx.types.u16,
55             (I32, false) => tcx.types.u32,
56             (I64, false) => tcx.types.u64,
57             (I128, false) => tcx.types.u128,
58             (I8, true) => tcx.types.i8,
59             (I16, true) => tcx.types.i16,
60             (I32, true) => tcx.types.i32,
61             (I64, true) => tcx.types.i64,
62             (I128, true) => tcx.types.i128,
63         }
64     }
65
66     /// Gets the Integer type from an attr::IntType.
67     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
68         let dl = cx.data_layout();
69
70         match ity {
71             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
72             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
73             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
74             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
75             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
76             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
77                 dl.ptr_sized_integer()
78             }
79         }
80     }
81
82     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
83         match ity {
84             ty::IntTy::I8 => I8,
85             ty::IntTy::I16 => I16,
86             ty::IntTy::I32 => I32,
87             ty::IntTy::I64 => I64,
88             ty::IntTy::I128 => I128,
89             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
90         }
91     }
92     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
93         match ity {
94             ty::UintTy::U8 => I8,
95             ty::UintTy::U16 => I16,
96             ty::UintTy::U32 => I32,
97             ty::UintTy::U64 => I64,
98             ty::UintTy::U128 => I128,
99             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
100         }
101     }
102
103     /// Finds the appropriate Integer type and signedness for the given
104     /// signed discriminant range and `#[repr]` attribute.
105     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
106     /// that shouldn't affect anything, other than maybe debuginfo.
107     fn repr_discr<'tcx>(
108         tcx: TyCtxt<'tcx>,
109         ty: Ty<'tcx>,
110         repr: &ReprOptions,
111         min: i128,
112         max: i128,
113     ) -> (Integer, bool) {
114         // Theoretically, negative values could be larger in unsigned representation
115         // than the unsigned representation of the signed minimum. However, if there
116         // are any negative values, the only valid unsigned representation is u128
117         // which can fit all i128 values, so the result remains unaffected.
118         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
119         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
120
121         if let Some(ity) = repr.int {
122             let discr = Integer::from_attr(&tcx, ity);
123             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
124             if discr < fit {
125                 bug!(
126                     "Integer::repr_discr: `#[repr]` hint too small for \
127                       discriminant range of enum `{}",
128                     ty
129                 )
130             }
131             return (discr, ity.is_signed());
132         }
133
134         let at_least = if repr.c() {
135             // This is usually I32, however it can be different on some platforms,
136             // notably hexagon and arm-none/thumb-none
137             tcx.data_layout().c_enum_min_size
138         } else {
139             // repr(Rust) enums try to be as small as possible
140             I8
141         };
142
143         // If there are no negative values, we can use the unsigned fit.
144         if min >= 0 {
145             (cmp::max(unsigned_fit, at_least), false)
146         } else {
147             (cmp::max(signed_fit, at_least), true)
148         }
149     }
150 }
151
152 pub trait PrimitiveExt {
153     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
154     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 }
156
157 impl PrimitiveExt for Primitive {
158     #[inline]
159     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
160         match *self {
161             Int(i, signed) => i.to_ty(tcx, signed),
162             F32 => tcx.types.f32,
163             F64 => tcx.types.f64,
164             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
165         }
166     }
167
168     /// Return an *integer* type matching this primitive.
169     /// Useful in particular when dealing with enum discriminants.
170     #[inline]
171     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
172         match *self {
173             Int(i, signed) => i.to_ty(tcx, signed),
174             Pointer => tcx.types.usize,
175             F32 | F64 => bug!("floats do not have an int type"),
176         }
177     }
178 }
179
180 /// The first half of a fat pointer.
181 ///
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
185
186 /// The second half of a fat pointer.
187 ///
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
191
192 /// The maximum supported number of lanes in a SIMD vector.
193 ///
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
198
199 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
201     Unknown(Ty<'tcx>),
202     SizeOverflow(Ty<'tcx>),
203     NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
204 }
205
206 impl<'tcx> fmt::Display for LayoutError<'tcx> {
207     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208         match *self {
209             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
210             LayoutError::SizeOverflow(ty) => {
211                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
212             }
213             LayoutError::NormalizationFailure(t, e) => write!(
214                 f,
215                 "unable to determine layout for `{}` because `{}` cannot be normalized",
216                 t,
217                 e.get_type_for_failure()
218             ),
219         }
220     }
221 }
222
223 #[instrument(skip(tcx, query), level = "debug")]
224 fn layout_of<'tcx>(
225     tcx: TyCtxt<'tcx>,
226     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
227 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
228     ty::tls::with_related_context(tcx, move |icx| {
229         let (param_env, ty) = query.into_parts();
230         debug!(?ty);
231
232         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
233             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
234         }
235
236         // Update the ImplicitCtxt to increase the layout_depth
237         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
238
239         ty::tls::enter_context(&icx, |_| {
240             let param_env = param_env.with_reveal_all_normalized(tcx);
241             let unnormalized_ty = ty;
242
243             // FIXME: We might want to have two different versions of `layout_of`:
244             // One that can be called after typecheck has completed and can use
245             // `normalize_erasing_regions` here and another one that can be called
246             // before typecheck has completed and uses `try_normalize_erasing_regions`.
247             let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
248                 Ok(t) => t,
249                 Err(normalization_error) => {
250                     return Err(LayoutError::NormalizationFailure(ty, normalization_error));
251                 }
252             };
253
254             if ty != unnormalized_ty {
255                 // Ensure this layout is also cached for the normalized type.
256                 return tcx.layout_of(param_env.and(ty));
257             }
258
259             let cx = LayoutCx { tcx, param_env };
260
261             let layout = cx.layout_of_uncached(ty)?;
262             let layout = TyAndLayout { ty, layout };
263
264             cx.record_layout_for_printing(layout);
265
266             // Type-level uninhabitedness should always imply ABI uninhabitedness.
267             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
268                 assert!(layout.abi.is_uninhabited());
269             }
270
271             Ok(layout)
272         })
273     })
274 }
275
276 pub struct LayoutCx<'tcx, C> {
277     pub tcx: C,
278     pub param_env: ty::ParamEnv<'tcx>,
279 }
280
281 #[derive(Copy, Clone, Debug)]
282 enum StructKind {
283     /// A tuple, closure, or univariant which cannot be coerced to unsized.
284     AlwaysSized,
285     /// A univariant, the last field of which may be coerced to unsized.
286     MaybeUnsized,
287     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
288     Prefixed(Size, Align),
289 }
290
291 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
292 // This is used to go between `memory_index` (source field order to memory order)
293 // and `inverse_memory_index` (memory order to source field order).
294 // See also `FieldsShape::Arbitrary::memory_index` for more details.
295 // FIXME(eddyb) build a better abstraction for permutations, if possible.
296 fn invert_mapping(map: &[u32]) -> Vec<u32> {
297     let mut inverse = vec![0; map.len()];
298     for i in 0..map.len() {
299         inverse[map[i] as usize] = i as u32;
300     }
301     inverse
302 }
303
304 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
305     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
306         let dl = self.data_layout();
307         let b_align = b.value.align(dl);
308         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
309         let b_offset = a.value.size(dl).align_to(b_align.abi);
310         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
311
312         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
313         // returns the last maximum.
314         let largest_niche = Niche::from_scalar(dl, b_offset, b)
315             .into_iter()
316             .chain(Niche::from_scalar(dl, Size::ZERO, a))
317             .max_by_key(|niche| niche.available(dl));
318
319         Layout {
320             variants: Variants::Single { index: VariantIdx::new(0) },
321             fields: FieldsShape::Arbitrary {
322                 offsets: vec![Size::ZERO, b_offset],
323                 memory_index: vec![0, 1],
324             },
325             abi: Abi::ScalarPair(a, b),
326             largest_niche,
327             align,
328             size,
329         }
330     }
331
332     fn univariant_uninterned(
333         &self,
334         ty: Ty<'tcx>,
335         fields: &[TyAndLayout<'_>],
336         repr: &ReprOptions,
337         kind: StructKind,
338     ) -> Result<Layout, LayoutError<'tcx>> {
339         let dl = self.data_layout();
340         let pack = repr.pack;
341         if pack.is_some() && repr.align.is_some() {
342             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
343             return Err(LayoutError::Unknown(ty));
344         }
345
346         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
347
348         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
349
350         let optimize = !repr.inhibit_struct_field_reordering_opt();
351         if optimize {
352             let end =
353                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
354             let optimizing = &mut inverse_memory_index[..end];
355             let field_align = |f: &TyAndLayout<'_>| {
356                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
357             };
358
359             // If `-Z randomize-layout` was enabled for the type definition we can shuffle
360             // the field ordering to try and catch some code making assumptions about layouts
361             // we don't guarantee
362             if repr.can_randomize_type_layout() {
363                 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
364                 // randomize field ordering with
365                 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
366
367                 // Shuffle the ordering of the fields
368                 optimizing.shuffle(&mut rng);
369
370             // Otherwise we just leave things alone and actually optimize the type's fields
371             } else {
372                 match kind {
373                     StructKind::AlwaysSized | StructKind::MaybeUnsized => {
374                         optimizing.sort_by_key(|&x| {
375                             // Place ZSTs first to avoid "interesting offsets",
376                             // especially with only one or two non-ZST fields.
377                             let f = &fields[x as usize];
378                             (!f.is_zst(), cmp::Reverse(field_align(f)))
379                         });
380                     }
381
382                     StructKind::Prefixed(..) => {
383                         // Sort in ascending alignment so that the layout stays optimal
384                         // regardless of the prefix
385                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
386                     }
387                 }
388
389                 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
390                 //                 regardless of the status of `-Z randomize-layout`
391             }
392         }
393
394         // inverse_memory_index holds field indices by increasing memory offset.
395         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
396         // We now write field offsets to the corresponding offset slot;
397         // field 5 with offset 0 puts 0 in offsets[5].
398         // At the bottom of this function, we invert `inverse_memory_index` to
399         // produce `memory_index` (see `invert_mapping`).
400
401         let mut sized = true;
402         let mut offsets = vec![Size::ZERO; fields.len()];
403         let mut offset = Size::ZERO;
404         let mut largest_niche = None;
405         let mut largest_niche_available = 0;
406
407         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
408             let prefix_align =
409                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
410             align = align.max(AbiAndPrefAlign::new(prefix_align));
411             offset = prefix_size.align_to(prefix_align);
412         }
413
414         for &i in &inverse_memory_index {
415             let field = fields[i as usize];
416             if !sized {
417                 self.tcx.sess.delay_span_bug(
418                     DUMMY_SP,
419                     &format!(
420                         "univariant: field #{} of `{}` comes after unsized field",
421                         offsets.len(),
422                         ty
423                     ),
424                 );
425             }
426
427             if field.is_unsized() {
428                 sized = false;
429             }
430
431             // Invariant: offset < dl.obj_size_bound() <= 1<<61
432             let field_align = if let Some(pack) = pack {
433                 field.align.min(AbiAndPrefAlign::new(pack))
434             } else {
435                 field.align
436             };
437             offset = offset.align_to(field_align.abi);
438             align = align.max(field_align);
439
440             debug!("univariant offset: {:?} field: {:#?}", offset, field);
441             offsets[i as usize] = offset;
442
443             if !repr.hide_niche() {
444                 if let Some(mut niche) = field.largest_niche {
445                     let available = niche.available(dl);
446                     if available > largest_niche_available {
447                         largest_niche_available = available;
448                         niche.offset += offset;
449                         largest_niche = Some(niche);
450                     }
451                 }
452             }
453
454             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
455         }
456
457         if let Some(repr_align) = repr.align {
458             align = align.max(AbiAndPrefAlign::new(repr_align));
459         }
460
461         debug!("univariant min_size: {:?}", offset);
462         let min_size = offset;
463
464         // As stated above, inverse_memory_index holds field indices by increasing offset.
465         // This makes it an already-sorted view of the offsets vec.
466         // To invert it, consider:
467         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
468         // Field 5 would be the first element, so memory_index is i:
469         // Note: if we didn't optimize, it's already right.
470
471         let memory_index =
472             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
473
474         let size = min_size.align_to(align.abi);
475         let mut abi = Abi::Aggregate { sized };
476
477         // Unpack newtype ABIs and find scalar pairs.
478         if sized && size.bytes() > 0 {
479             // All other fields must be ZSTs.
480             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
481
482             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
483                 // We have exactly one non-ZST field.
484                 (Some((i, field)), None, None) => {
485                     // Field fills the struct and it has a scalar or scalar pair ABI.
486                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
487                     {
488                         match field.abi {
489                             // For plain scalars, or vectors of them, we can't unpack
490                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
491                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
492                                 abi = field.abi;
493                             }
494                             // But scalar pairs are Rust-specific and get
495                             // treated as aggregates by C ABIs anyway.
496                             Abi::ScalarPair(..) => {
497                                 abi = field.abi;
498                             }
499                             _ => {}
500                         }
501                     }
502                 }
503
504                 // Two non-ZST fields, and they're both scalars.
505                 (
506                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })),
507                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })),
508                     None,
509                 ) => {
510                     // Order by the memory placement, not source order.
511                     let ((i, a), (j, b)) =
512                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
513                     let pair = self.scalar_pair(a, b);
514                     let pair_offsets = match pair.fields {
515                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
516                             assert_eq!(memory_index, &[0, 1]);
517                             offsets
518                         }
519                         _ => bug!(),
520                     };
521                     if offsets[i] == pair_offsets[0]
522                         && offsets[j] == pair_offsets[1]
523                         && align == pair.align
524                         && size == pair.size
525                     {
526                         // We can use `ScalarPair` only when it matches our
527                         // already computed layout (including `#[repr(C)]`).
528                         abi = pair.abi;
529                     }
530                 }
531
532                 _ => {}
533             }
534         }
535
536         if fields.iter().any(|f| f.abi.is_uninhabited()) {
537             abi = Abi::Uninhabited;
538         }
539
540         Ok(Layout {
541             variants: Variants::Single { index: VariantIdx::new(0) },
542             fields: FieldsShape::Arbitrary { offsets, memory_index },
543             abi,
544             largest_niche,
545             align,
546             size,
547         })
548     }
549
550     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
551         let tcx = self.tcx;
552         let param_env = self.param_env;
553         let dl = self.data_layout();
554         let scalar_unit = |value: Primitive| {
555             let size = value.size(dl);
556             assert!(size.bits() <= 128);
557             Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } }
558         };
559         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
560
561         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
562             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
563         };
564         debug_assert!(!ty.has_infer_types_or_consts());
565
566         Ok(match *ty.kind() {
567             // Basic scalars.
568             ty::Bool => tcx.intern_layout(Layout::scalar(
569                 self,
570                 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
571             )),
572             ty::Char => tcx.intern_layout(Layout::scalar(
573                 self,
574                 Scalar {
575                     value: Int(I32, false),
576                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
577                 },
578             )),
579             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
580             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
581             ty::Float(fty) => scalar(match fty {
582                 ty::FloatTy::F32 => F32,
583                 ty::FloatTy::F64 => F64,
584             }),
585             ty::FnPtr(_) => {
586                 let mut ptr = scalar_unit(Pointer);
587                 ptr.valid_range = ptr.valid_range.with_start(1);
588                 tcx.intern_layout(Layout::scalar(self, ptr))
589             }
590
591             // The never type.
592             ty::Never => tcx.intern_layout(Layout {
593                 variants: Variants::Single { index: VariantIdx::new(0) },
594                 fields: FieldsShape::Primitive,
595                 abi: Abi::Uninhabited,
596                 largest_niche: None,
597                 align: dl.i8_align,
598                 size: Size::ZERO,
599             }),
600
601             // Potentially-wide pointers.
602             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
603                 let mut data_ptr = scalar_unit(Pointer);
604                 if !ty.is_unsafe_ptr() {
605                     data_ptr.valid_range = data_ptr.valid_range.with_start(1);
606                 }
607
608                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
609                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
610                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
611                 }
612
613                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
614                 let metadata = match unsized_part.kind() {
615                     ty::Foreign(..) => {
616                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
617                     }
618                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
619                     ty::Dynamic(..) => {
620                         let mut vtable = scalar_unit(Pointer);
621                         vtable.valid_range = vtable.valid_range.with_start(1);
622                         vtable
623                     }
624                     _ => return Err(LayoutError::Unknown(unsized_part)),
625                 };
626
627                 // Effectively a (ptr, meta) tuple.
628                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
629             }
630
631             // Arrays and slices.
632             ty::Array(element, mut count) => {
633                 if count.has_projections() {
634                     count = tcx.normalize_erasing_regions(param_env, count);
635                     if count.has_projections() {
636                         return Err(LayoutError::Unknown(ty));
637                     }
638                 }
639
640                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
641                 let element = self.layout_of(element)?;
642                 let size =
643                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
644
645                 let abi =
646                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
647                         Abi::Uninhabited
648                     } else {
649                         Abi::Aggregate { sized: true }
650                     };
651
652                 let largest_niche = if count != 0 { element.largest_niche } else { None };
653
654                 tcx.intern_layout(Layout {
655                     variants: Variants::Single { index: VariantIdx::new(0) },
656                     fields: FieldsShape::Array { stride: element.size, count },
657                     abi,
658                     largest_niche,
659                     align: element.align,
660                     size,
661                 })
662             }
663             ty::Slice(element) => {
664                 let element = self.layout_of(element)?;
665                 tcx.intern_layout(Layout {
666                     variants: Variants::Single { index: VariantIdx::new(0) },
667                     fields: FieldsShape::Array { stride: element.size, count: 0 },
668                     abi: Abi::Aggregate { sized: false },
669                     largest_niche: None,
670                     align: element.align,
671                     size: Size::ZERO,
672                 })
673             }
674             ty::Str => tcx.intern_layout(Layout {
675                 variants: Variants::Single { index: VariantIdx::new(0) },
676                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
677                 abi: Abi::Aggregate { sized: false },
678                 largest_niche: None,
679                 align: dl.i8_align,
680                 size: Size::ZERO,
681             }),
682
683             // Odd unit types.
684             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
685             ty::Dynamic(..) | ty::Foreign(..) => {
686                 let mut unit = self.univariant_uninterned(
687                     ty,
688                     &[],
689                     &ReprOptions::default(),
690                     StructKind::AlwaysSized,
691                 )?;
692                 match unit.abi {
693                     Abi::Aggregate { ref mut sized } => *sized = false,
694                     _ => bug!(),
695                 }
696                 tcx.intern_layout(unit)
697             }
698
699             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
700
701             ty::Closure(_, ref substs) => {
702                 let tys = substs.as_closure().upvar_tys();
703                 univariant(
704                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
705                     &ReprOptions::default(),
706                     StructKind::AlwaysSized,
707                 )?
708             }
709
710             ty::Tuple(tys) => {
711                 let kind =
712                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
713
714                 univariant(
715                     &tys.iter()
716                         .map(|k| self.layout_of(k.expect_ty()))
717                         .collect::<Result<Vec<_>, _>>()?,
718                     &ReprOptions::default(),
719                     kind,
720                 )?
721             }
722
723             // SIMD vector types.
724             ty::Adt(def, substs) if def.repr.simd() => {
725                 if !def.is_struct() {
726                     // Should have yielded E0517 by now.
727                     tcx.sess.delay_span_bug(
728                         DUMMY_SP,
729                         "#[repr(simd)] was applied to an ADT that is not a struct",
730                     );
731                     return Err(LayoutError::Unknown(ty));
732                 }
733
734                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
735                 //
736                 // * #[repr(simd)] struct S(T, T, T, T);
737                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
738                 // * #[repr(simd)] struct S([T; 4])
739                 //
740                 // where T is a primitive scalar (integer/float/pointer).
741
742                 // SIMD vectors with zero fields are not supported.
743                 // (should be caught by typeck)
744                 if def.non_enum_variant().fields.is_empty() {
745                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
746                 }
747
748                 // Type of the first ADT field:
749                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
750
751                 // Heterogeneous SIMD vectors are not supported:
752                 // (should be caught by typeck)
753                 for fi in &def.non_enum_variant().fields {
754                     if fi.ty(tcx, substs) != f0_ty {
755                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
756                     }
757                 }
758
759                 // The element type and number of elements of the SIMD vector
760                 // are obtained from:
761                 //
762                 // * the element type and length of the single array field, if
763                 // the first field is of array type, or
764                 //
765                 // * the homogenous field type and the number of fields.
766                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
767                     // First ADT field is an array:
768
769                     // SIMD vectors with multiple array fields are not supported:
770                     // (should be caught by typeck)
771                     if def.non_enum_variant().fields.len() != 1 {
772                         tcx.sess.fatal(&format!(
773                             "monomorphising SIMD type `{}` with more than one array field",
774                             ty
775                         ));
776                     }
777
778                     // Extract the number of elements from the layout of the array field:
779                     let Ok(TyAndLayout {
780                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
781                         ..
782                     }) = self.layout_of(f0_ty) else {
783                         return Err(LayoutError::Unknown(ty));
784                     };
785
786                     (*e_ty, *count, true)
787                 } else {
788                     // First ADT field is not an array:
789                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
790                 };
791
792                 // SIMD vectors of zero length are not supported.
793                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
794                 // support.
795                 //
796                 // Can't be caught in typeck if the array length is generic.
797                 if e_len == 0 {
798                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
799                 } else if e_len > MAX_SIMD_LANES {
800                     tcx.sess.fatal(&format!(
801                         "monomorphising SIMD type `{}` of length greater than {}",
802                         ty, MAX_SIMD_LANES,
803                     ));
804                 }
805
806                 // Compute the ABI of the element type:
807                 let e_ly = self.layout_of(e_ty)?;
808                 let Abi::Scalar(e_abi) = e_ly.abi else {
809                     // This error isn't caught in typeck, e.g., if
810                     // the element type of the vector is generic.
811                     tcx.sess.fatal(&format!(
812                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
813                         (integer/float/pointer) element type `{}`",
814                         ty, e_ty
815                     ))
816                 };
817
818                 // Compute the size and alignment of the vector:
819                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
820                 let align = dl.vector_align(size);
821                 let size = size.align_to(align.abi);
822
823                 // Compute the placement of the vector fields:
824                 let fields = if is_array {
825                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
826                 } else {
827                     FieldsShape::Array { stride: e_ly.size, count: e_len }
828                 };
829
830                 tcx.intern_layout(Layout {
831                     variants: Variants::Single { index: VariantIdx::new(0) },
832                     fields,
833                     abi: Abi::Vector { element: e_abi, count: e_len },
834                     largest_niche: e_ly.largest_niche,
835                     size,
836                     align,
837                 })
838             }
839
840             // ADTs.
841             ty::Adt(def, substs) => {
842                 // Cache the field layouts.
843                 let variants = def
844                     .variants
845                     .iter()
846                     .map(|v| {
847                         v.fields
848                             .iter()
849                             .map(|field| self.layout_of(field.ty(tcx, substs)))
850                             .collect::<Result<Vec<_>, _>>()
851                     })
852                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
853
854                 if def.is_union() {
855                     if def.repr.pack.is_some() && def.repr.align.is_some() {
856                         self.tcx.sess.delay_span_bug(
857                             tcx.def_span(def.did),
858                             "union cannot be packed and aligned",
859                         );
860                         return Err(LayoutError::Unknown(ty));
861                     }
862
863                     let mut align =
864                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
865
866                     if let Some(repr_align) = def.repr.align {
867                         align = align.max(AbiAndPrefAlign::new(repr_align));
868                     }
869
870                     let optimize = !def.repr.inhibit_union_abi_opt();
871                     let mut size = Size::ZERO;
872                     let mut abi = Abi::Aggregate { sized: true };
873                     let index = VariantIdx::new(0);
874                     for field in &variants[index] {
875                         assert!(!field.is_unsized());
876                         align = align.max(field.align);
877
878                         // If all non-ZST fields have the same ABI, forward this ABI
879                         if optimize && !field.is_zst() {
880                             // Normalize scalar_unit to the maximal valid range
881                             let field_abi = match field.abi {
882                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
883                                 Abi::ScalarPair(x, y) => {
884                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
885                                 }
886                                 Abi::Vector { element: x, count } => {
887                                     Abi::Vector { element: scalar_unit(x.value), count }
888                                 }
889                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
890                                     Abi::Aggregate { sized: true }
891                                 }
892                             };
893
894                             if size == Size::ZERO {
895                                 // first non ZST: initialize 'abi'
896                                 abi = field_abi;
897                             } else if abi != field_abi {
898                                 // different fields have different ABI: reset to Aggregate
899                                 abi = Abi::Aggregate { sized: true };
900                             }
901                         }
902
903                         size = cmp::max(size, field.size);
904                     }
905
906                     if let Some(pack) = def.repr.pack {
907                         align = align.min(AbiAndPrefAlign::new(pack));
908                     }
909
910                     return Ok(tcx.intern_layout(Layout {
911                         variants: Variants::Single { index },
912                         fields: FieldsShape::Union(
913                             NonZeroUsize::new(variants[index].len())
914                                 .ok_or(LayoutError::Unknown(ty))?,
915                         ),
916                         abi,
917                         largest_niche: None,
918                         align,
919                         size: size.align_to(align.abi),
920                     }));
921                 }
922
923                 // A variant is absent if it's uninhabited and only has ZST fields.
924                 // Present uninhabited variants only require space for their fields,
925                 // but *not* an encoding of the discriminant (e.g., a tag value).
926                 // See issue #49298 for more details on the need to leave space
927                 // for non-ZST uninhabited data (mostly partial initialization).
928                 let absent = |fields: &[TyAndLayout<'_>]| {
929                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
930                     let is_zst = fields.iter().all(|f| f.is_zst());
931                     uninhabited && is_zst
932                 };
933                 let (present_first, present_second) = {
934                     let mut present_variants = variants
935                         .iter_enumerated()
936                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
937                     (present_variants.next(), present_variants.next())
938                 };
939                 let present_first = match present_first {
940                     Some(present_first) => present_first,
941                     // Uninhabited because it has no variants, or only absent ones.
942                     None if def.is_enum() => {
943                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
944                     }
945                     // If it's a struct, still compute a layout so that we can still compute the
946                     // field offsets.
947                     None => VariantIdx::new(0),
948                 };
949
950                 let is_struct = !def.is_enum() ||
951                     // Only one variant is present.
952                     (present_second.is_none() &&
953                     // Representation optimizations are allowed.
954                     !def.repr.inhibit_enum_layout_opt());
955                 if is_struct {
956                     // Struct, or univariant enum equivalent to a struct.
957                     // (Typechecking will reject discriminant-sizing attrs.)
958
959                     let v = present_first;
960                     let kind = if def.is_enum() || variants[v].is_empty() {
961                         StructKind::AlwaysSized
962                     } else {
963                         let param_env = tcx.param_env(def.did);
964                         let last_field = def.variants[v].fields.last().unwrap();
965                         let always_sized =
966                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
967                         if !always_sized {
968                             StructKind::MaybeUnsized
969                         } else {
970                             StructKind::AlwaysSized
971                         }
972                     };
973
974                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
975                     st.variants = Variants::Single { index: v };
976                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
977                     match st.abi {
978                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
979                             // the asserts ensure that we are not using the
980                             // `#[rustc_layout_scalar_valid_range(n)]`
981                             // attribute to widen the range of anything as that would probably
982                             // result in UB somewhere
983                             // FIXME(eddyb) the asserts are probably not needed,
984                             // as larger validity ranges would result in missed
985                             // optimizations, *not* wrongly assuming the inner
986                             // value is valid. e.g. unions enlarge validity ranges,
987                             // because the values may be uninitialized.
988                             if let Bound::Included(start) = start {
989                                 // FIXME(eddyb) this might be incorrect - it doesn't
990                                 // account for wrap-around (end < start) ranges.
991                                 assert!(scalar.valid_range.start <= start);
992                                 scalar.valid_range.start = start;
993                             }
994                             if let Bound::Included(end) = end {
995                                 // FIXME(eddyb) this might be incorrect - it doesn't
996                                 // account for wrap-around (end < start) ranges.
997                                 assert!(scalar.valid_range.end >= end);
998                                 scalar.valid_range.end = end;
999                             }
1000
1001                             // Update `largest_niche` if we have introduced a larger niche.
1002                             let niche = if def.repr.hide_niche() {
1003                                 None
1004                             } else {
1005                                 Niche::from_scalar(dl, Size::ZERO, *scalar)
1006                             };
1007                             if let Some(niche) = niche {
1008                                 match st.largest_niche {
1009                                     Some(largest_niche) => {
1010                                         // Replace the existing niche even if they're equal,
1011                                         // because this one is at a lower offset.
1012                                         if largest_niche.available(dl) <= niche.available(dl) {
1013                                             st.largest_niche = Some(niche);
1014                                         }
1015                                     }
1016                                     None => st.largest_niche = Some(niche),
1017                                 }
1018                             }
1019                         }
1020                         _ => assert!(
1021                             start == Bound::Unbounded && end == Bound::Unbounded,
1022                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1023                             def,
1024                             st,
1025                         ),
1026                     }
1027
1028                     return Ok(tcx.intern_layout(st));
1029                 }
1030
1031                 // At this point, we have handled all unions and
1032                 // structs. (We have also handled univariant enums
1033                 // that allow representation optimization.)
1034                 assert!(def.is_enum());
1035
1036                 // The current code for niche-filling relies on variant indices
1037                 // instead of actual discriminants, so dataful enums with
1038                 // explicit discriminants (RFC #2363) would misbehave.
1039                 let no_explicit_discriminants = def
1040                     .variants
1041                     .iter_enumerated()
1042                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1043
1044                 let mut niche_filling_layout = None;
1045
1046                 // Niche-filling enum optimization.
1047                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1048                     let mut dataful_variant = None;
1049                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1050
1051                     // Find one non-ZST variant.
1052                     'variants: for (v, fields) in variants.iter_enumerated() {
1053                         if absent(fields) {
1054                             continue 'variants;
1055                         }
1056                         for f in fields {
1057                             if !f.is_zst() {
1058                                 if dataful_variant.is_none() {
1059                                     dataful_variant = Some(v);
1060                                     continue 'variants;
1061                                 } else {
1062                                     dataful_variant = None;
1063                                     break 'variants;
1064                                 }
1065                             }
1066                         }
1067                         niche_variants = *niche_variants.start().min(&v)..=v;
1068                     }
1069
1070                     if niche_variants.start() > niche_variants.end() {
1071                         dataful_variant = None;
1072                     }
1073
1074                     if let Some(i) = dataful_variant {
1075                         let count = (niche_variants.end().as_u32()
1076                             - niche_variants.start().as_u32()
1077                             + 1) as u128;
1078
1079                         // Find the field with the largest niche
1080                         let niche_candidate = variants[i]
1081                             .iter()
1082                             .enumerate()
1083                             .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1084                             .max_by_key(|(_, niche)| niche.available(dl));
1085
1086                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1087                             niche_candidate.and_then(|(field_index, niche)| {
1088                                 Some((field_index, niche, niche.reserve(self, count)?))
1089                             })
1090                         {
1091                             let mut align = dl.aggregate_align;
1092                             let st = variants
1093                                 .iter_enumerated()
1094                                 .map(|(j, v)| {
1095                                     let mut st = self.univariant_uninterned(
1096                                         ty,
1097                                         v,
1098                                         &def.repr,
1099                                         StructKind::AlwaysSized,
1100                                     )?;
1101                                     st.variants = Variants::Single { index: j };
1102
1103                                     align = align.max(st.align);
1104
1105                                     Ok(st)
1106                                 })
1107                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1108
1109                             let offset = st[i].fields.offset(field_index) + niche.offset;
1110                             let size = st[i].size;
1111
1112                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1113                                 Abi::Uninhabited
1114                             } else {
1115                                 match st[i].abi {
1116                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1117                                     Abi::ScalarPair(first, second) => {
1118                                         // We need to use scalar_unit to reset the
1119                                         // valid range to the maximal one for that
1120                                         // primitive, because only the niche is
1121                                         // guaranteed to be initialised, not the
1122                                         // other primitive.
1123                                         if offset.bytes() == 0 {
1124                                             Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
1125                                         } else {
1126                                             Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
1127                                         }
1128                                     }
1129                                     _ => Abi::Aggregate { sized: true },
1130                                 }
1131                             };
1132
1133                             let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1134
1135                             niche_filling_layout = Some(Layout {
1136                                 variants: Variants::Multiple {
1137                                     tag: niche_scalar,
1138                                     tag_encoding: TagEncoding::Niche {
1139                                         dataful_variant: i,
1140                                         niche_variants,
1141                                         niche_start,
1142                                     },
1143                                     tag_field: 0,
1144                                     variants: st,
1145                                 },
1146                                 fields: FieldsShape::Arbitrary {
1147                                     offsets: vec![offset],
1148                                     memory_index: vec![0],
1149                                 },
1150                                 abi,
1151                                 largest_niche,
1152                                 size,
1153                                 align,
1154                             });
1155                         }
1156                     }
1157                 }
1158
1159                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1160                 let discr_type = def.repr.discr_type();
1161                 let bits = Integer::from_attr(self, discr_type).size().bits();
1162                 for (i, discr) in def.discriminants(tcx) {
1163                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1164                         continue;
1165                     }
1166                     let mut x = discr.val as i128;
1167                     if discr_type.is_signed() {
1168                         // sign extend the raw representation to be an i128
1169                         x = (x << (128 - bits)) >> (128 - bits);
1170                     }
1171                     if x < min {
1172                         min = x;
1173                     }
1174                     if x > max {
1175                         max = x;
1176                     }
1177                 }
1178                 // We might have no inhabited variants, so pretend there's at least one.
1179                 if (min, max) == (i128::MAX, i128::MIN) {
1180                     min = 0;
1181                     max = 0;
1182                 }
1183                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1184                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1185
1186                 let mut align = dl.aggregate_align;
1187                 let mut size = Size::ZERO;
1188
1189                 // We're interested in the smallest alignment, so start large.
1190                 let mut start_align = Align::from_bytes(256).unwrap();
1191                 assert_eq!(Integer::for_align(dl, start_align), None);
1192
1193                 // repr(C) on an enum tells us to make a (tag, union) layout,
1194                 // so we need to grow the prefix alignment to be at least
1195                 // the alignment of the union. (This value is used both for
1196                 // determining the alignment of the overall enum, and the
1197                 // determining the alignment of the payload after the tag.)
1198                 let mut prefix_align = min_ity.align(dl).abi;
1199                 if def.repr.c() {
1200                     for fields in &variants {
1201                         for field in fields {
1202                             prefix_align = prefix_align.max(field.align.abi);
1203                         }
1204                     }
1205                 }
1206
1207                 // Create the set of structs that represent each variant.
1208                 let mut layout_variants = variants
1209                     .iter_enumerated()
1210                     .map(|(i, field_layouts)| {
1211                         let mut st = self.univariant_uninterned(
1212                             ty,
1213                             &field_layouts,
1214                             &def.repr,
1215                             StructKind::Prefixed(min_ity.size(), prefix_align),
1216                         )?;
1217                         st.variants = Variants::Single { index: i };
1218                         // Find the first field we can't move later
1219                         // to make room for a larger discriminant.
1220                         for field in
1221                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1222                         {
1223                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1224                                 start_align = start_align.min(field.align.abi);
1225                                 break;
1226                             }
1227                         }
1228                         size = cmp::max(size, st.size);
1229                         align = align.max(st.align);
1230                         Ok(st)
1231                     })
1232                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1233
1234                 // Align the maximum variant size to the largest alignment.
1235                 size = size.align_to(align.abi);
1236
1237                 if size.bytes() >= dl.obj_size_bound() {
1238                     return Err(LayoutError::SizeOverflow(ty));
1239                 }
1240
1241                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1242                 if typeck_ity < min_ity {
1243                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1244                     // some reason at this point (based on values discriminant can take on). Mostly
1245                     // because this discriminant will be loaded, and then stored into variable of
1246                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1247                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1248                     // discriminant values. That would be a bug, because then, in codegen, in order
1249                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1250                     // space necessary to represent would have to be discarded (or layout is wrong
1251                     // on thinking it needs 16 bits)
1252                     bug!(
1253                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1254                         min_ity,
1255                         typeck_ity
1256                     );
1257                     // However, it is fine to make discr type however large (as an optimisation)
1258                     // after this point â€“ we’ll just truncate the value we load in codegen.
1259                 }
1260
1261                 // Check to see if we should use a different type for the
1262                 // discriminant. We can safely use a type with the same size
1263                 // as the alignment of the first field of each variant.
1264                 // We increase the size of the discriminant to avoid LLVM copying
1265                 // padding when it doesn't need to. This normally causes unaligned
1266                 // load/stores and excessive memcpy/memset operations. By using a
1267                 // bigger integer size, LLVM can be sure about its contents and
1268                 // won't be so conservative.
1269
1270                 // Use the initial field alignment
1271                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1272                     min_ity
1273                 } else {
1274                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1275                 };
1276
1277                 // If the alignment is not larger than the chosen discriminant size,
1278                 // don't use the alignment as the final size.
1279                 if ity <= min_ity {
1280                     ity = min_ity;
1281                 } else {
1282                     // Patch up the variants' first few fields.
1283                     let old_ity_size = min_ity.size();
1284                     let new_ity_size = ity.size();
1285                     for variant in &mut layout_variants {
1286                         match variant.fields {
1287                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1288                                 for i in offsets {
1289                                     if *i <= old_ity_size {
1290                                         assert_eq!(*i, old_ity_size);
1291                                         *i = new_ity_size;
1292                                     }
1293                                 }
1294                                 // We might be making the struct larger.
1295                                 if variant.size <= old_ity_size {
1296                                     variant.size = new_ity_size;
1297                                 }
1298                             }
1299                             _ => bug!(),
1300                         }
1301                     }
1302                 }
1303
1304                 let tag_mask = ity.size().unsigned_int_max();
1305                 let tag = Scalar {
1306                     value: Int(ity, signed),
1307                     valid_range: WrappingRange {
1308                         start: (min as u128 & tag_mask),
1309                         end: (max as u128 & tag_mask),
1310                     },
1311                 };
1312                 let mut abi = Abi::Aggregate { sized: true };
1313
1314                 // Without latter check aligned enums with custom discriminant values
1315                 // Would result in ICE see the issue #92464 for more info
1316                 if tag.value.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1317                     abi = Abi::Scalar(tag);
1318                 } else {
1319                     // Try to use a ScalarPair for all tagged enums.
1320                     let mut common_prim = None;
1321                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1322                         let offsets = match layout_variant.fields {
1323                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1324                             _ => bug!(),
1325                         };
1326                         let mut fields =
1327                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1328                         let (field, offset) = match (fields.next(), fields.next()) {
1329                             (None, None) => continue,
1330                             (Some(pair), None) => pair,
1331                             _ => {
1332                                 common_prim = None;
1333                                 break;
1334                             }
1335                         };
1336                         let prim = match field.abi {
1337                             Abi::Scalar(scalar) => scalar.value,
1338                             _ => {
1339                                 common_prim = None;
1340                                 break;
1341                             }
1342                         };
1343                         if let Some(pair) = common_prim {
1344                             // This is pretty conservative. We could go fancier
1345                             // by conflating things like i32 and u32, or even
1346                             // realising that (u8, u8) could just cohabit with
1347                             // u16 or even u32.
1348                             if pair != (prim, offset) {
1349                                 common_prim = None;
1350                                 break;
1351                             }
1352                         } else {
1353                             common_prim = Some((prim, offset));
1354                         }
1355                     }
1356                     if let Some((prim, offset)) = common_prim {
1357                         let pair = self.scalar_pair(tag, scalar_unit(prim));
1358                         let pair_offsets = match pair.fields {
1359                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1360                                 assert_eq!(memory_index, &[0, 1]);
1361                                 offsets
1362                             }
1363                             _ => bug!(),
1364                         };
1365                         if pair_offsets[0] == Size::ZERO
1366                             && pair_offsets[1] == *offset
1367                             && align == pair.align
1368                             && size == pair.size
1369                         {
1370                             // We can use `ScalarPair` only when it matches our
1371                             // already computed layout (including `#[repr(C)]`).
1372                             abi = pair.abi;
1373                         }
1374                     }
1375                 }
1376
1377                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1378                     abi = Abi::Uninhabited;
1379                 }
1380
1381                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1382
1383                 let tagged_layout = Layout {
1384                     variants: Variants::Multiple {
1385                         tag,
1386                         tag_encoding: TagEncoding::Direct,
1387                         tag_field: 0,
1388                         variants: layout_variants,
1389                     },
1390                     fields: FieldsShape::Arbitrary {
1391                         offsets: vec![Size::ZERO],
1392                         memory_index: vec![0],
1393                     },
1394                     largest_niche,
1395                     abi,
1396                     align,
1397                     size,
1398                 };
1399
1400                 let best_layout = match (tagged_layout, niche_filling_layout) {
1401                     (tagged_layout, Some(niche_filling_layout)) => {
1402                         // Pick the smaller layout; otherwise,
1403                         // pick the layout with the larger niche; otherwise,
1404                         // pick tagged as it has simpler codegen.
1405                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1406                             let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1407                             (layout.size, cmp::Reverse(niche_size))
1408                         })
1409                     }
1410                     (tagged_layout, None) => tagged_layout,
1411                 };
1412
1413                 tcx.intern_layout(best_layout)
1414             }
1415
1416             // Types with no meaningful known layout.
1417             ty::Projection(_) | ty::Opaque(..) => {
1418                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1419                 // if that was possible, so there's no reason to try again here.
1420                 return Err(LayoutError::Unknown(ty));
1421             }
1422
1423             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1424                 bug!("Layout::compute: unexpected type `{}`", ty)
1425             }
1426
1427             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1428                 return Err(LayoutError::Unknown(ty));
1429             }
1430         })
1431     }
1432 }
1433
1434 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1435 #[derive(Clone, Debug, PartialEq)]
1436 enum SavedLocalEligibility {
1437     Unassigned,
1438     Assigned(VariantIdx),
1439     // FIXME: Use newtype_index so we aren't wasting bytes
1440     Ineligible(Option<u32>),
1441 }
1442
1443 // When laying out generators, we divide our saved local fields into two
1444 // categories: overlap-eligible and overlap-ineligible.
1445 //
1446 // Those fields which are ineligible for overlap go in a "prefix" at the
1447 // beginning of the layout, and always have space reserved for them.
1448 //
1449 // Overlap-eligible fields are only assigned to one variant, so we lay
1450 // those fields out for each variant and put them right after the
1451 // prefix.
1452 //
1453 // Finally, in the layout details, we point to the fields from the
1454 // variants they are assigned to. It is possible for some fields to be
1455 // included in multiple variants. No field ever "moves around" in the
1456 // layout; its offset is always the same.
1457 //
1458 // Also included in the layout are the upvars and the discriminant.
1459 // These are included as fields on the "outer" layout; they are not part
1460 // of any variant.
1461 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1462     /// Compute the eligibility and assignment of each local.
1463     fn generator_saved_local_eligibility(
1464         &self,
1465         info: &GeneratorLayout<'tcx>,
1466     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1467         use SavedLocalEligibility::*;
1468
1469         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1470             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1471
1472         // The saved locals not eligible for overlap. These will get
1473         // "promoted" to the prefix of our generator.
1474         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1475
1476         // Figure out which of our saved locals are fields in only
1477         // one variant. The rest are deemed ineligible for overlap.
1478         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1479             for local in fields {
1480                 match assignments[*local] {
1481                     Unassigned => {
1482                         assignments[*local] = Assigned(variant_index);
1483                     }
1484                     Assigned(idx) => {
1485                         // We've already seen this local at another suspension
1486                         // point, so it is no longer a candidate.
1487                         trace!(
1488                             "removing local {:?} in >1 variant ({:?}, {:?})",
1489                             local,
1490                             variant_index,
1491                             idx
1492                         );
1493                         ineligible_locals.insert(*local);
1494                         assignments[*local] = Ineligible(None);
1495                     }
1496                     Ineligible(_) => {}
1497                 }
1498             }
1499         }
1500
1501         // Next, check every pair of eligible locals to see if they
1502         // conflict.
1503         for local_a in info.storage_conflicts.rows() {
1504             let conflicts_a = info.storage_conflicts.count(local_a);
1505             if ineligible_locals.contains(local_a) {
1506                 continue;
1507             }
1508
1509             for local_b in info.storage_conflicts.iter(local_a) {
1510                 // local_a and local_b are storage live at the same time, therefore they
1511                 // cannot overlap in the generator layout. The only way to guarantee
1512                 // this is if they are in the same variant, or one is ineligible
1513                 // (which means it is stored in every variant).
1514                 if ineligible_locals.contains(local_b)
1515                     || assignments[local_a] == assignments[local_b]
1516                 {
1517                     continue;
1518                 }
1519
1520                 // If they conflict, we will choose one to make ineligible.
1521                 // This is not always optimal; it's just a greedy heuristic that
1522                 // seems to produce good results most of the time.
1523                 let conflicts_b = info.storage_conflicts.count(local_b);
1524                 let (remove, other) =
1525                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1526                 ineligible_locals.insert(remove);
1527                 assignments[remove] = Ineligible(None);
1528                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1529             }
1530         }
1531
1532         // Count the number of variants in use. If only one of them, then it is
1533         // impossible to overlap any locals in our layout. In this case it's
1534         // always better to make the remaining locals ineligible, so we can
1535         // lay them out with the other locals in the prefix and eliminate
1536         // unnecessary padding bytes.
1537         {
1538             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1539             for assignment in &assignments {
1540                 if let Assigned(idx) = assignment {
1541                     used_variants.insert(*idx);
1542                 }
1543             }
1544             if used_variants.count() < 2 {
1545                 for assignment in assignments.iter_mut() {
1546                     *assignment = Ineligible(None);
1547                 }
1548                 ineligible_locals.insert_all();
1549             }
1550         }
1551
1552         // Write down the order of our locals that will be promoted to the prefix.
1553         {
1554             for (idx, local) in ineligible_locals.iter().enumerate() {
1555                 assignments[local] = Ineligible(Some(idx as u32));
1556             }
1557         }
1558         debug!("generator saved local assignments: {:?}", assignments);
1559
1560         (ineligible_locals, assignments)
1561     }
1562
1563     /// Compute the full generator layout.
1564     fn generator_layout(
1565         &self,
1566         ty: Ty<'tcx>,
1567         def_id: hir::def_id::DefId,
1568         substs: SubstsRef<'tcx>,
1569     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1570         use SavedLocalEligibility::*;
1571         let tcx = self.tcx;
1572         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1573
1574         let info = match tcx.generator_layout(def_id) {
1575             None => return Err(LayoutError::Unknown(ty)),
1576             Some(info) => info,
1577         };
1578         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1579
1580         // Build a prefix layout, including "promoting" all ineligible
1581         // locals as part of the prefix. We compute the layout of all of
1582         // these fields at once to get optimal packing.
1583         let tag_index = substs.as_generator().prefix_tys().count();
1584
1585         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1586         let max_discr = (info.variant_fields.len() - 1) as u128;
1587         let discr_int = Integer::fit_unsigned(max_discr);
1588         let discr_int_ty = discr_int.to_ty(tcx, false);
1589         let tag = Scalar {
1590             value: Primitive::Int(discr_int, false),
1591             valid_range: WrappingRange { start: 0, end: max_discr },
1592         };
1593         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag));
1594         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1595
1596         let promoted_layouts = ineligible_locals
1597             .iter()
1598             .map(|local| subst_field(info.field_tys[local]))
1599             .map(|ty| tcx.mk_maybe_uninit(ty))
1600             .map(|ty| self.layout_of(ty));
1601         let prefix_layouts = substs
1602             .as_generator()
1603             .prefix_tys()
1604             .map(|ty| self.layout_of(ty))
1605             .chain(iter::once(Ok(tag_layout)))
1606             .chain(promoted_layouts)
1607             .collect::<Result<Vec<_>, _>>()?;
1608         let prefix = self.univariant_uninterned(
1609             ty,
1610             &prefix_layouts,
1611             &ReprOptions::default(),
1612             StructKind::AlwaysSized,
1613         )?;
1614
1615         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1616
1617         // Split the prefix layout into the "outer" fields (upvars and
1618         // discriminant) and the "promoted" fields. Promoted fields will
1619         // get included in each variant that requested them in
1620         // GeneratorLayout.
1621         debug!("prefix = {:#?}", prefix);
1622         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1623             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1624                 let mut inverse_memory_index = invert_mapping(&memory_index);
1625
1626                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1627                 // "outer" and "promoted" fields respectively.
1628                 let b_start = (tag_index + 1) as u32;
1629                 let offsets_b = offsets.split_off(b_start as usize);
1630                 let offsets_a = offsets;
1631
1632                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1633                 // by preserving the order but keeping only one disjoint "half" each.
1634                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1635                 let inverse_memory_index_b: Vec<_> =
1636                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1637                 inverse_memory_index.retain(|&i| i < b_start);
1638                 let inverse_memory_index_a = inverse_memory_index;
1639
1640                 // Since `inverse_memory_index_{a,b}` each only refer to their
1641                 // respective fields, they can be safely inverted
1642                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1643                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1644
1645                 let outer_fields =
1646                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1647                 (outer_fields, offsets_b, memory_index_b)
1648             }
1649             _ => bug!(),
1650         };
1651
1652         let mut size = prefix.size;
1653         let mut align = prefix.align;
1654         let variants = info
1655             .variant_fields
1656             .iter_enumerated()
1657             .map(|(index, variant_fields)| {
1658                 // Only include overlap-eligible fields when we compute our variant layout.
1659                 let variant_only_tys = variant_fields
1660                     .iter()
1661                     .filter(|local| match assignments[**local] {
1662                         Unassigned => bug!(),
1663                         Assigned(v) if v == index => true,
1664                         Assigned(_) => bug!("assignment does not match variant"),
1665                         Ineligible(_) => false,
1666                     })
1667                     .map(|local| subst_field(info.field_tys[*local]));
1668
1669                 let mut variant = self.univariant_uninterned(
1670                     ty,
1671                     &variant_only_tys
1672                         .map(|ty| self.layout_of(ty))
1673                         .collect::<Result<Vec<_>, _>>()?,
1674                     &ReprOptions::default(),
1675                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1676                 )?;
1677                 variant.variants = Variants::Single { index };
1678
1679                 let (offsets, memory_index) = match variant.fields {
1680                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1681                     _ => bug!(),
1682                 };
1683
1684                 // Now, stitch the promoted and variant-only fields back together in
1685                 // the order they are mentioned by our GeneratorLayout.
1686                 // Because we only use some subset (that can differ between variants)
1687                 // of the promoted fields, we can't just pick those elements of the
1688                 // `promoted_memory_index` (as we'd end up with gaps).
1689                 // So instead, we build an "inverse memory_index", as if all of the
1690                 // promoted fields were being used, but leave the elements not in the
1691                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1692                 // obtain a valid (bijective) mapping.
1693                 const INVALID_FIELD_IDX: u32 = !0;
1694                 let mut combined_inverse_memory_index =
1695                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1696                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1697                 let combined_offsets = variant_fields
1698                     .iter()
1699                     .enumerate()
1700                     .map(|(i, local)| {
1701                         let (offset, memory_index) = match assignments[*local] {
1702                             Unassigned => bug!(),
1703                             Assigned(_) => {
1704                                 let (offset, memory_index) =
1705                                     offsets_and_memory_index.next().unwrap();
1706                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1707                             }
1708                             Ineligible(field_idx) => {
1709                                 let field_idx = field_idx.unwrap() as usize;
1710                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1711                             }
1712                         };
1713                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1714                         offset
1715                     })
1716                     .collect();
1717
1718                 // Remove the unused slots and invert the mapping to obtain the
1719                 // combined `memory_index` (also see previous comment).
1720                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1721                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1722
1723                 variant.fields = FieldsShape::Arbitrary {
1724                     offsets: combined_offsets,
1725                     memory_index: combined_memory_index,
1726                 };
1727
1728                 size = size.max(variant.size);
1729                 align = align.max(variant.align);
1730                 Ok(variant)
1731             })
1732             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1733
1734         size = size.align_to(align.abi);
1735
1736         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1737         {
1738             Abi::Uninhabited
1739         } else {
1740             Abi::Aggregate { sized: true }
1741         };
1742
1743         let layout = tcx.intern_layout(Layout {
1744             variants: Variants::Multiple {
1745                 tag,
1746                 tag_encoding: TagEncoding::Direct,
1747                 tag_field: tag_index,
1748                 variants,
1749             },
1750             fields: outer_fields,
1751             abi,
1752             largest_niche: prefix.largest_niche,
1753             size,
1754             align,
1755         });
1756         debug!("generator layout ({:?}): {:#?}", ty, layout);
1757         Ok(layout)
1758     }
1759
1760     /// This is invoked by the `layout_of` query to record the final
1761     /// layout of each type.
1762     #[inline(always)]
1763     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1764         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1765         // for dumping later.
1766         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1767             self.record_layout_for_printing_outlined(layout)
1768         }
1769     }
1770
1771     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1772         // Ignore layouts that are done with non-empty environments or
1773         // non-monomorphic layouts, as the user only wants to see the stuff
1774         // resulting from the final codegen session.
1775         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1776             return;
1777         }
1778
1779         // (delay format until we actually need it)
1780         let record = |kind, packed, opt_discr_size, variants| {
1781             let type_desc = format!("{:?}", layout.ty);
1782             self.tcx.sess.code_stats.record_type_size(
1783                 kind,
1784                 type_desc,
1785                 layout.align.abi,
1786                 layout.size,
1787                 packed,
1788                 opt_discr_size,
1789                 variants,
1790             );
1791         };
1792
1793         let adt_def = match *layout.ty.kind() {
1794             ty::Adt(ref adt_def, _) => {
1795                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1796                 adt_def
1797             }
1798
1799             ty::Closure(..) => {
1800                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1801                 record(DataTypeKind::Closure, false, None, vec![]);
1802                 return;
1803             }
1804
1805             _ => {
1806                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1807                 return;
1808             }
1809         };
1810
1811         let adt_kind = adt_def.adt_kind();
1812         let adt_packed = adt_def.repr.pack.is_some();
1813
1814         let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1815             let mut min_size = Size::ZERO;
1816             let field_info: Vec<_> = flds
1817                 .iter()
1818                 .enumerate()
1819                 .map(|(i, &name)| {
1820                     let field_layout = layout.field(self, i);
1821                     let offset = layout.fields.offset(i);
1822                     let field_end = offset + field_layout.size;
1823                     if min_size < field_end {
1824                         min_size = field_end;
1825                     }
1826                     FieldInfo {
1827                         name: name.to_string(),
1828                         offset: offset.bytes(),
1829                         size: field_layout.size.bytes(),
1830                         align: field_layout.align.abi.bytes(),
1831                     }
1832                 })
1833                 .collect();
1834
1835             VariantInfo {
1836                 name: n.map(|n| n.to_string()),
1837                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1838                 align: layout.align.abi.bytes(),
1839                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1840                 fields: field_info,
1841             }
1842         };
1843
1844         match layout.variants {
1845             Variants::Single { index } => {
1846                 if !adt_def.variants.is_empty() && layout.fields != FieldsShape::Primitive {
1847                     debug!(
1848                         "print-type-size `{:#?}` variant {}",
1849                         layout, adt_def.variants[index].name
1850                     );
1851                     let variant_def = &adt_def.variants[index];
1852                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1853                     record(
1854                         adt_kind.into(),
1855                         adt_packed,
1856                         None,
1857                         vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1858                     );
1859                 } else {
1860                     // (This case arises for *empty* enums; so give it
1861                     // zero variants.)
1862                     record(adt_kind.into(), adt_packed, None, vec![]);
1863                 }
1864             }
1865
1866             Variants::Multiple { tag, ref tag_encoding, .. } => {
1867                 debug!(
1868                     "print-type-size `{:#?}` adt general variants def {}",
1869                     layout.ty,
1870                     adt_def.variants.len()
1871                 );
1872                 let variant_infos: Vec<_> = adt_def
1873                     .variants
1874                     .iter_enumerated()
1875                     .map(|(i, variant_def)| {
1876                         let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1877                         build_variant_info(
1878                             Some(variant_def.name),
1879                             &fields,
1880                             layout.for_variant(self, i),
1881                         )
1882                     })
1883                     .collect();
1884                 record(
1885                     adt_kind.into(),
1886                     adt_packed,
1887                     match tag_encoding {
1888                         TagEncoding::Direct => Some(tag.value.size(self)),
1889                         _ => None,
1890                     },
1891                     variant_infos,
1892                 );
1893             }
1894         }
1895     }
1896 }
1897
1898 /// Type size "skeleton", i.e., the only information determining a type's size.
1899 /// While this is conservative, (aside from constant sizes, only pointers,
1900 /// newtypes thereof and null pointer optimized enums are allowed), it is
1901 /// enough to statically check common use cases of transmute.
1902 #[derive(Copy, Clone, Debug)]
1903 pub enum SizeSkeleton<'tcx> {
1904     /// Any statically computable Layout.
1905     Known(Size),
1906
1907     /// A potentially-fat pointer.
1908     Pointer {
1909         /// If true, this pointer is never null.
1910         non_zero: bool,
1911         /// The type which determines the unsized metadata, if any,
1912         /// of this pointer. Either a type parameter or a projection
1913         /// depending on one, with regions erased.
1914         tail: Ty<'tcx>,
1915     },
1916 }
1917
1918 impl<'tcx> SizeSkeleton<'tcx> {
1919     pub fn compute(
1920         ty: Ty<'tcx>,
1921         tcx: TyCtxt<'tcx>,
1922         param_env: ty::ParamEnv<'tcx>,
1923     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1924         debug_assert!(!ty.has_infer_types_or_consts());
1925
1926         // First try computing a static layout.
1927         let err = match tcx.layout_of(param_env.and(ty)) {
1928             Ok(layout) => {
1929                 return Ok(SizeSkeleton::Known(layout.size));
1930             }
1931             Err(err) => err,
1932         };
1933
1934         match *ty.kind() {
1935             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1936                 let non_zero = !ty.is_unsafe_ptr();
1937                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1938                 match tail.kind() {
1939                     ty::Param(_) | ty::Projection(_) => {
1940                         debug_assert!(tail.has_param_types_or_consts());
1941                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1942                     }
1943                     _ => bug!(
1944                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1945                               tail `{}` is not a type parameter or a projection",
1946                         ty,
1947                         err,
1948                         tail
1949                     ),
1950                 }
1951             }
1952
1953             ty::Adt(def, substs) => {
1954                 // Only newtypes and enums w/ nullable pointer optimization.
1955                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1956                     return Err(err);
1957                 }
1958
1959                 // Get a zero-sized variant or a pointer newtype.
1960                 let zero_or_ptr_variant = |i| {
1961                     let i = VariantIdx::new(i);
1962                     let fields = def.variants[i]
1963                         .fields
1964                         .iter()
1965                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1966                     let mut ptr = None;
1967                     for field in fields {
1968                         let field = field?;
1969                         match field {
1970                             SizeSkeleton::Known(size) => {
1971                                 if size.bytes() > 0 {
1972                                     return Err(err);
1973                                 }
1974                             }
1975                             SizeSkeleton::Pointer { .. } => {
1976                                 if ptr.is_some() {
1977                                     return Err(err);
1978                                 }
1979                                 ptr = Some(field);
1980                             }
1981                         }
1982                     }
1983                     Ok(ptr)
1984                 };
1985
1986                 let v0 = zero_or_ptr_variant(0)?;
1987                 // Newtype.
1988                 if def.variants.len() == 1 {
1989                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1990                         return Ok(SizeSkeleton::Pointer {
1991                             non_zero: non_zero
1992                                 || match tcx.layout_scalar_valid_range(def.did) {
1993                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1994                                     (Bound::Included(start), Bound::Included(end)) => {
1995                                         0 < start && start < end
1996                                     }
1997                                     _ => false,
1998                                 },
1999                             tail,
2000                         });
2001                     } else {
2002                         return Err(err);
2003                     }
2004                 }
2005
2006                 let v1 = zero_or_ptr_variant(1)?;
2007                 // Nullable pointer enum optimization.
2008                 match (v0, v1) {
2009                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2010                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2011                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2012                     }
2013                     _ => Err(err),
2014                 }
2015             }
2016
2017             ty::Projection(_) | ty::Opaque(..) => {
2018                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2019                 if ty == normalized {
2020                     Err(err)
2021                 } else {
2022                     SizeSkeleton::compute(normalized, tcx, param_env)
2023                 }
2024             }
2025
2026             _ => Err(err),
2027         }
2028     }
2029
2030     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2031         match (self, other) {
2032             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2033             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2034                 a == b
2035             }
2036             _ => false,
2037         }
2038     }
2039 }
2040
2041 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2042     fn tcx(&self) -> TyCtxt<'tcx>;
2043 }
2044
2045 pub trait HasParamEnv<'tcx> {
2046     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2047 }
2048
2049 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2050     #[inline]
2051     fn data_layout(&self) -> &TargetDataLayout {
2052         &self.data_layout
2053     }
2054 }
2055
2056 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2057     fn target_spec(&self) -> &Target {
2058         &self.sess.target
2059     }
2060 }
2061
2062 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2063     #[inline]
2064     fn tcx(&self) -> TyCtxt<'tcx> {
2065         *self
2066     }
2067 }
2068
2069 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2070     #[inline]
2071     fn data_layout(&self) -> &TargetDataLayout {
2072         &self.data_layout
2073     }
2074 }
2075
2076 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2077     fn target_spec(&self) -> &Target {
2078         &self.sess.target
2079     }
2080 }
2081
2082 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2083     #[inline]
2084     fn tcx(&self) -> TyCtxt<'tcx> {
2085         **self
2086     }
2087 }
2088
2089 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2090     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2091         self.param_env
2092     }
2093 }
2094
2095 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2096     fn data_layout(&self) -> &TargetDataLayout {
2097         self.tcx.data_layout()
2098     }
2099 }
2100
2101 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2102     fn target_spec(&self) -> &Target {
2103         self.tcx.target_spec()
2104     }
2105 }
2106
2107 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2108     fn tcx(&self) -> TyCtxt<'tcx> {
2109         self.tcx.tcx()
2110     }
2111 }
2112
2113 pub trait MaybeResult<T> {
2114     type Error;
2115
2116     fn from(x: Result<T, Self::Error>) -> Self;
2117     fn to_result(self) -> Result<T, Self::Error>;
2118 }
2119
2120 impl<T> MaybeResult<T> for T {
2121     type Error = !;
2122
2123     fn from(Ok(x): Result<T, Self::Error>) -> Self {
2124         x
2125     }
2126     fn to_result(self) -> Result<T, Self::Error> {
2127         Ok(self)
2128     }
2129 }
2130
2131 impl<T, E> MaybeResult<T> for Result<T, E> {
2132     type Error = E;
2133
2134     fn from(x: Result<T, Self::Error>) -> Self {
2135         x
2136     }
2137     fn to_result(self) -> Result<T, Self::Error> {
2138         self
2139     }
2140 }
2141
2142 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2143
2144 /// Trait for contexts that want to be able to compute layouts of types.
2145 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2146 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2147     /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2148     /// returned from `layout_of` (see also `handle_layout_err`).
2149     type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2150
2151     /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2152     // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2153     #[inline]
2154     fn layout_tcx_at_span(&self) -> Span {
2155         DUMMY_SP
2156     }
2157
2158     /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2159     /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2160     ///
2161     /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2162     /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2163     /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2164     /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2165     fn handle_layout_err(
2166         &self,
2167         err: LayoutError<'tcx>,
2168         span: Span,
2169         ty: Ty<'tcx>,
2170     ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2171 }
2172
2173 /// Blanket extension trait for contexts that can compute layouts of types.
2174 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2175     /// Computes the layout of a type. Note that this implicitly
2176     /// executes in "reveal all" mode, and will normalize the input type.
2177     #[inline]
2178     fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2179         self.spanned_layout_of(ty, DUMMY_SP)
2180     }
2181
2182     /// Computes the layout of a type, at `span`. Note that this implicitly
2183     /// executes in "reveal all" mode, and will normalize the input type.
2184     // FIXME(eddyb) avoid passing information like this, and instead add more
2185     // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2186     #[inline]
2187     fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2188         let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2189         let tcx = self.tcx().at(span);
2190
2191         MaybeResult::from(
2192             tcx.layout_of(self.param_env().and(ty))
2193                 .map_err(|err| self.handle_layout_err(err, span, ty)),
2194         )
2195     }
2196 }
2197
2198 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2199
2200 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2201     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2202
2203     #[inline]
2204     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2205         err
2206     }
2207 }
2208
2209 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2210     type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2211
2212     #[inline]
2213     fn layout_tcx_at_span(&self) -> Span {
2214         self.tcx.span
2215     }
2216
2217     #[inline]
2218     fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2219         err
2220     }
2221 }
2222
2223 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2224 where
2225     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2226 {
2227     fn ty_and_layout_for_variant(
2228         this: TyAndLayout<'tcx>,
2229         cx: &C,
2230         variant_index: VariantIdx,
2231     ) -> TyAndLayout<'tcx> {
2232         let layout = match this.variants {
2233             Variants::Single { index }
2234                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2235                 if index == variant_index &&
2236                 // Don't confuse variants of uninhabited enums with the enum itself.
2237                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2238                 this.fields != FieldsShape::Primitive =>
2239             {
2240                 this.layout
2241             }
2242
2243             Variants::Single { index } => {
2244                 let tcx = cx.tcx();
2245                 let param_env = cx.param_env();
2246
2247                 // Deny calling for_variant more than once for non-Single enums.
2248                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2249                     assert_eq!(original_layout.variants, Variants::Single { index });
2250                 }
2251
2252                 let fields = match this.ty.kind() {
2253                     ty::Adt(def, _) if def.variants.is_empty() =>
2254                         bug!("for_variant called on zero-variant enum"),
2255                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2256                     _ => bug!(),
2257                 };
2258                 tcx.intern_layout(Layout {
2259                     variants: Variants::Single { index: variant_index },
2260                     fields: match NonZeroUsize::new(fields) {
2261                         Some(fields) => FieldsShape::Union(fields),
2262                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2263                     },
2264                     abi: Abi::Uninhabited,
2265                     largest_niche: None,
2266                     align: tcx.data_layout.i8_align,
2267                     size: Size::ZERO,
2268                 })
2269             }
2270
2271             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2272         };
2273
2274         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2275
2276         TyAndLayout { ty: this.ty, layout }
2277     }
2278
2279     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2280         enum TyMaybeWithLayout<'tcx> {
2281             Ty(Ty<'tcx>),
2282             TyAndLayout(TyAndLayout<'tcx>),
2283         }
2284
2285         fn field_ty_or_layout<'tcx>(
2286             this: TyAndLayout<'tcx>,
2287             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2288             i: usize,
2289         ) -> TyMaybeWithLayout<'tcx> {
2290             let tcx = cx.tcx();
2291             let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2292                 let layout = Layout::scalar(cx, tag);
2293                 TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
2294             };
2295
2296             match *this.ty.kind() {
2297                 ty::Bool
2298                 | ty::Char
2299                 | ty::Int(_)
2300                 | ty::Uint(_)
2301                 | ty::Float(_)
2302                 | ty::FnPtr(_)
2303                 | ty::Never
2304                 | ty::FnDef(..)
2305                 | ty::GeneratorWitness(..)
2306                 | ty::Foreign(..)
2307                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2308
2309                 // Potentially-fat pointers.
2310                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2311                     assert!(i < this.fields.count());
2312
2313                     // Reuse the fat `*T` type as its own thin pointer data field.
2314                     // This provides information about, e.g., DST struct pointees
2315                     // (which may have no non-DST form), and will work as long
2316                     // as the `Abi` or `FieldsShape` is checked by users.
2317                     if i == 0 {
2318                         let nil = tcx.mk_unit();
2319                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2320                             tcx.mk_mut_ptr(nil)
2321                         } else {
2322                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2323                         };
2324
2325                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2326                         // the `Result` should always work because the type is
2327                         // always either `*mut ()` or `&'static mut ()`.
2328                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2329                             ty: this.ty,
2330                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2331                         });
2332                     }
2333
2334                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2335                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2336                         ty::Dynamic(_, _) => {
2337                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2338                                 tcx.lifetimes.re_static,
2339                                 tcx.mk_array(tcx.types.usize, 3),
2340                             ))
2341                             /* FIXME: use actual fn pointers
2342                             Warning: naively computing the number of entries in the
2343                             vtable by counting the methods on the trait + methods on
2344                             all parent traits does not work, because some methods can
2345                             be not object safe and thus excluded from the vtable.
2346                             Increase this counter if you tried to implement this but
2347                             failed to do it without duplicating a lot of code from
2348                             other places in the compiler: 2
2349                             tcx.mk_tup(&[
2350                                 tcx.mk_array(tcx.types.usize, 3),
2351                                 tcx.mk_array(Option<fn()>),
2352                             ])
2353                             */
2354                         }
2355                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2356                     }
2357                 }
2358
2359                 // Arrays and slices.
2360                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2361                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2362
2363                 // Tuples, generators and closures.
2364                 ty::Closure(_, ref substs) => field_ty_or_layout(
2365                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2366                     cx,
2367                     i,
2368                 ),
2369
2370                 ty::Generator(def_id, ref substs, _) => match this.variants {
2371                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2372                         substs
2373                             .as_generator()
2374                             .state_tys(def_id, tcx)
2375                             .nth(index.as_usize())
2376                             .unwrap()
2377                             .nth(i)
2378                             .unwrap(),
2379                     ),
2380                     Variants::Multiple { tag, tag_field, .. } => {
2381                         if i == tag_field {
2382                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2383                         }
2384                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2385                     }
2386                 },
2387
2388                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2389
2390                 // ADTs.
2391                 ty::Adt(def, substs) => {
2392                     match this.variants {
2393                         Variants::Single { index } => {
2394                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2395                         }
2396
2397                         // Discriminant field for enums (where applicable).
2398                         Variants::Multiple { tag, .. } => {
2399                             assert_eq!(i, 0);
2400                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2401                         }
2402                     }
2403                 }
2404
2405                 ty::Projection(_)
2406                 | ty::Bound(..)
2407                 | ty::Placeholder(..)
2408                 | ty::Opaque(..)
2409                 | ty::Param(_)
2410                 | ty::Infer(_)
2411                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2412             }
2413         }
2414
2415         match field_ty_or_layout(this, cx, i) {
2416             TyMaybeWithLayout::Ty(field_ty) => {
2417                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2418                     bug!(
2419                         "failed to get layout for `{}`: {},\n\
2420                          despite it being a field (#{}) of an existing layout: {:#?}",
2421                         field_ty,
2422                         e,
2423                         i,
2424                         this
2425                     )
2426                 })
2427             }
2428             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2429         }
2430     }
2431
2432     fn ty_and_layout_pointee_info_at(
2433         this: TyAndLayout<'tcx>,
2434         cx: &C,
2435         offset: Size,
2436     ) -> Option<PointeeInfo> {
2437         let tcx = cx.tcx();
2438         let param_env = cx.param_env();
2439
2440         let addr_space_of_ty = |ty: Ty<'tcx>| {
2441             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2442         };
2443
2444         let pointee_info = match *this.ty.kind() {
2445             ty::RawPtr(mt) if offset.bytes() == 0 => {
2446                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2447                     size: layout.size,
2448                     align: layout.align.abi,
2449                     safe: None,
2450                     address_space: addr_space_of_ty(mt.ty),
2451                 })
2452             }
2453             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2454                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2455                     size: layout.size,
2456                     align: layout.align.abi,
2457                     safe: None,
2458                     address_space: cx.data_layout().instruction_address_space,
2459                 })
2460             }
2461             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2462                 let address_space = addr_space_of_ty(ty);
2463                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2464                     // Use conservative pointer kind if not optimizing. This saves us the
2465                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2466                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2467                     PointerKind::Shared
2468                 } else {
2469                     match mt {
2470                         hir::Mutability::Not => {
2471                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2472                                 PointerKind::Frozen
2473                             } else {
2474                                 PointerKind::Shared
2475                             }
2476                         }
2477                         hir::Mutability::Mut => {
2478                             // References to self-referential structures should not be considered
2479                             // noalias, as another pointer to the structure can be obtained, that
2480                             // is not based-on the original reference. We consider all !Unpin
2481                             // types to be potentially self-referential here.
2482                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2483                                 PointerKind::UniqueBorrowed
2484                             } else {
2485                                 PointerKind::Shared
2486                             }
2487                         }
2488                     }
2489                 };
2490
2491                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2492                     size: layout.size,
2493                     align: layout.align.abi,
2494                     safe: Some(kind),
2495                     address_space,
2496                 })
2497             }
2498
2499             _ => {
2500                 let mut data_variant = match this.variants {
2501                     // Within the discriminant field, only the niche itself is
2502                     // always initialized, so we only check for a pointer at its
2503                     // offset.
2504                     //
2505                     // If the niche is a pointer, it's either valid (according
2506                     // to its type), or null (which the niche field's scalar
2507                     // validity range encodes).  This allows using
2508                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2509                     // this will continue to work as long as we don't start
2510                     // using more niches than just null (e.g., the first page of
2511                     // the address space, or unaligned pointers).
2512                     Variants::Multiple {
2513                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2514                         tag_field,
2515                         ..
2516                     } if this.fields.offset(tag_field) == offset => {
2517                         Some(this.for_variant(cx, dataful_variant))
2518                     }
2519                     _ => Some(this),
2520                 };
2521
2522                 if let Some(variant) = data_variant {
2523                     // We're not interested in any unions.
2524                     if let FieldsShape::Union(_) = variant.fields {
2525                         data_variant = None;
2526                     }
2527                 }
2528
2529                 let mut result = None;
2530
2531                 if let Some(variant) = data_variant {
2532                     let ptr_end = offset + Pointer.size(cx);
2533                     for i in 0..variant.fields.count() {
2534                         let field_start = variant.fields.offset(i);
2535                         if field_start <= offset {
2536                             let field = variant.field(cx, i);
2537                             result = field.to_result().ok().and_then(|field| {
2538                                 if ptr_end <= field_start + field.size {
2539                                     // We found the right field, look inside it.
2540                                     let field_info =
2541                                         field.pointee_info_at(cx, offset - field_start);
2542                                     field_info
2543                                 } else {
2544                                     None
2545                                 }
2546                             });
2547                             if result.is_some() {
2548                                 break;
2549                             }
2550                         }
2551                     }
2552                 }
2553
2554                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2555                 if let Some(ref mut pointee) = result {
2556                     if let ty::Adt(def, _) = this.ty.kind() {
2557                         if def.is_box() && offset.bytes() == 0 {
2558                             pointee.safe = Some(PointerKind::UniqueOwned);
2559                         }
2560                     }
2561                 }
2562
2563                 result
2564             }
2565         };
2566
2567         debug!(
2568             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2569             offset,
2570             this.ty.kind(),
2571             pointee_info
2572         );
2573
2574         pointee_info
2575     }
2576 }
2577
2578 impl<'tcx> ty::Instance<'tcx> {
2579     // NOTE(eddyb) this is private to avoid using it from outside of
2580     // `fn_abi_of_instance` - any other uses are either too high-level
2581     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2582     // or should go through `FnAbi` instead, to avoid losing any
2583     // adjustments `fn_abi_of_instance` might be performing.
2584     fn fn_sig_for_fn_abi(
2585         &self,
2586         tcx: TyCtxt<'tcx>,
2587         param_env: ty::ParamEnv<'tcx>,
2588     ) -> ty::PolyFnSig<'tcx> {
2589         let ty = self.ty(tcx, param_env);
2590         match *ty.kind() {
2591             ty::FnDef(..) => {
2592                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2593                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2594                 // (i.e. due to being inside a projection that got normalized, see
2595                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2596                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2597                 let mut sig = match *ty.kind() {
2598                     ty::FnDef(def_id, substs) => tcx
2599                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2600                         .subst(tcx, substs),
2601                     _ => unreachable!(),
2602                 };
2603
2604                 if let ty::InstanceDef::VtableShim(..) = self.def {
2605                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2606                     sig = sig.map_bound(|mut sig| {
2607                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2608                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2609                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2610                         sig
2611                     });
2612                 }
2613                 sig
2614             }
2615             ty::Closure(def_id, substs) => {
2616                 let sig = substs.as_closure().sig();
2617
2618                 let bound_vars = tcx.mk_bound_variable_kinds(
2619                     sig.bound_vars()
2620                         .iter()
2621                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2622                 );
2623                 let br = ty::BoundRegion {
2624                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2625                     kind: ty::BoundRegionKind::BrEnv,
2626                 };
2627                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2628                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2629
2630                 let sig = sig.skip_binder();
2631                 ty::Binder::bind_with_vars(
2632                     tcx.mk_fn_sig(
2633                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2634                         sig.output(),
2635                         sig.c_variadic,
2636                         sig.unsafety,
2637                         sig.abi,
2638                     ),
2639                     bound_vars,
2640                 )
2641             }
2642             ty::Generator(_, substs, _) => {
2643                 let sig = substs.as_generator().poly_sig();
2644
2645                 let bound_vars = tcx.mk_bound_variable_kinds(
2646                     sig.bound_vars()
2647                         .iter()
2648                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2649                 );
2650                 let br = ty::BoundRegion {
2651                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2652                     kind: ty::BoundRegionKind::BrEnv,
2653                 };
2654                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2655                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2656
2657                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2658                 let pin_adt_ref = tcx.adt_def(pin_did);
2659                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2660                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2661
2662                 let sig = sig.skip_binder();
2663                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2664                 let state_adt_ref = tcx.adt_def(state_did);
2665                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2666                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2667                 ty::Binder::bind_with_vars(
2668                     tcx.mk_fn_sig(
2669                         [env_ty, sig.resume_ty].iter(),
2670                         &ret_ty,
2671                         false,
2672                         hir::Unsafety::Normal,
2673                         rustc_target::spec::abi::Abi::Rust,
2674                     ),
2675                     bound_vars,
2676                 )
2677             }
2678             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2679         }
2680     }
2681 }
2682
2683 /// Calculates whether a function's ABI can unwind or not.
2684 ///
2685 /// This takes two primary parameters:
2686 ///
2687 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2688 ///   codegen attrs for a defined function. For function pointers this set of
2689 ///   flags is the empty set. This is only applicable for Rust-defined
2690 ///   functions, and generally isn't needed except for small optimizations where
2691 ///   we try to say a function which otherwise might look like it could unwind
2692 ///   doesn't actually unwind (such as for intrinsics and such).
2693 ///
2694 /// * `abi` - this is the ABI that the function is defined with. This is the
2695 ///   primary factor for determining whether a function can unwind or not.
2696 ///
2697 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2698 /// panics are implemented with unwinds on most platform (when
2699 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2700 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2701 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2702 /// defined for each ABI individually, but it always corresponds to some form of
2703 /// stack-based unwinding (the exact mechanism of which varies
2704 /// platform-by-platform).
2705 ///
2706 /// Rust functions are classfied whether or not they can unwind based on the
2707 /// active "panic strategy". In other words Rust functions are considered to
2708 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2709 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2710 /// only if the final panic mode is panic=abort. In this scenario any code
2711 /// previously compiled assuming that a function can unwind is still correct, it
2712 /// just never happens to actually unwind at runtime.
2713 ///
2714 /// This function's answer to whether or not a function can unwind is quite
2715 /// impactful throughout the compiler. This affects things like:
2716 ///
2717 /// * Calling a function which can't unwind means codegen simply ignores any
2718 ///   associated unwinding cleanup.
2719 /// * Calling a function which can unwind from a function which can't unwind
2720 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2721 ///   aborts the process.
2722 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2723 ///   affects various optimizations and codegen.
2724 ///
2725 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2726 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2727 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2728 /// might (from a foreign exception or similar).
2729 #[inline]
2730 pub fn fn_can_unwind<'tcx>(
2731     tcx: TyCtxt<'tcx>,
2732     codegen_fn_attr_flags: CodegenFnAttrFlags,
2733     abi: SpecAbi,
2734 ) -> bool {
2735     // Special attribute for functions which can't unwind.
2736     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2737         return false;
2738     }
2739
2740     // Otherwise if this isn't special then unwinding is generally determined by
2741     // the ABI of the itself. ABIs like `C` have variants which also
2742     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2743     // ABIs have such an option. Otherwise the only other thing here is Rust
2744     // itself, and those ABIs are determined by the panic strategy configured
2745     // for this compilation.
2746     //
2747     // Unfortunately at this time there's also another caveat. Rust [RFC
2748     // 2945][rfc] has been accepted and is in the process of being implemented
2749     // and stabilized. In this interim state we need to deal with historical
2750     // rustc behavior as well as plan for future rustc behavior.
2751     //
2752     // Historically functions declared with `extern "C"` were marked at the
2753     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2754     // or not. This is UB for functions in `panic=unwind` mode that then
2755     // actually panic and unwind. Note that this behavior is true for both
2756     // externally declared functions as well as Rust-defined function.
2757     //
2758     // To fix this UB rustc would like to change in the future to catch unwinds
2759     // from function calls that may unwind within a Rust-defined `extern "C"`
2760     // function and forcibly abort the process, thereby respecting the
2761     // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2762     // ready to roll out, so determining whether or not the `C` family of ABIs
2763     // unwinds is conditional not only on their definition but also whether the
2764     // `#![feature(c_unwind)]` feature gate is active.
2765     //
2766     // Note that this means that unlike historical compilers rustc now, by
2767     // default, unconditionally thinks that the `C` ABI may unwind. This will
2768     // prevent some optimization opportunities, however, so we try to scope this
2769     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2770     // to `panic=abort`).
2771     //
2772     // Eventually the check against `c_unwind` here will ideally get removed and
2773     // this'll be a little cleaner as it'll be a straightforward check of the
2774     // ABI.
2775     //
2776     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2777     use SpecAbi::*;
2778     match abi {
2779         C { unwind }
2780         | System { unwind }
2781         | Cdecl { unwind }
2782         | Stdcall { unwind }
2783         | Fastcall { unwind }
2784         | Vectorcall { unwind }
2785         | Thiscall { unwind }
2786         | Aapcs { unwind }
2787         | Win64 { unwind }
2788         | SysV64 { unwind } => {
2789             unwind
2790                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2791         }
2792         PtxKernel
2793         | Msp430Interrupt
2794         | X86Interrupt
2795         | AmdGpuKernel
2796         | EfiApi
2797         | AvrInterrupt
2798         | AvrNonBlockingInterrupt
2799         | CCmseNonSecureCall
2800         | Wasm
2801         | RustIntrinsic
2802         | PlatformIntrinsic
2803         | Unadjusted => false,
2804         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2805     }
2806 }
2807
2808 #[inline]
2809 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2810     use rustc_target::spec::abi::Abi::*;
2811     match tcx.sess.target.adjust_abi(abi) {
2812         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2813
2814         // It's the ABI's job to select this, not ours.
2815         System { .. } => bug!("system abi should be selected elsewhere"),
2816         EfiApi => bug!("eficall abi should be selected elsewhere"),
2817
2818         Stdcall { .. } => Conv::X86Stdcall,
2819         Fastcall { .. } => Conv::X86Fastcall,
2820         Vectorcall { .. } => Conv::X86VectorCall,
2821         Thiscall { .. } => Conv::X86ThisCall,
2822         C { .. } => Conv::C,
2823         Unadjusted => Conv::C,
2824         Win64 { .. } => Conv::X86_64Win64,
2825         SysV64 { .. } => Conv::X86_64SysV,
2826         Aapcs { .. } => Conv::ArmAapcs,
2827         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2828         PtxKernel => Conv::PtxKernel,
2829         Msp430Interrupt => Conv::Msp430Intr,
2830         X86Interrupt => Conv::X86Intr,
2831         AmdGpuKernel => Conv::AmdGpuKernel,
2832         AvrInterrupt => Conv::AvrInterrupt,
2833         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2834         Wasm => Conv::C,
2835
2836         // These API constants ought to be more specific...
2837         Cdecl { .. } => Conv::C,
2838     }
2839 }
2840
2841 /// Error produced by attempting to compute or adjust a `FnAbi`.
2842 #[derive(Copy, Clone, Debug, HashStable)]
2843 pub enum FnAbiError<'tcx> {
2844     /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2845     Layout(LayoutError<'tcx>),
2846
2847     /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2848     AdjustForForeignAbi(call::AdjustForForeignAbiError),
2849 }
2850
2851 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2852     fn from(err: LayoutError<'tcx>) -> Self {
2853         Self::Layout(err)
2854     }
2855 }
2856
2857 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2858     fn from(err: call::AdjustForForeignAbiError) -> Self {
2859         Self::AdjustForForeignAbi(err)
2860     }
2861 }
2862
2863 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2864     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2865         match self {
2866             Self::Layout(err) => err.fmt(f),
2867             Self::AdjustForForeignAbi(err) => err.fmt(f),
2868         }
2869     }
2870 }
2871
2872 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2873 // just for error handling.
2874 #[derive(Debug)]
2875 pub enum FnAbiRequest<'tcx> {
2876     OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2877     OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2878 }
2879
2880 /// Trait for contexts that want to be able to compute `FnAbi`s.
2881 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2882 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2883     /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2884     /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2885     type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2886
2887     /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2888     /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2889     ///
2890     /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2891     /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2892     /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2893     /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2894     fn handle_fn_abi_err(
2895         &self,
2896         err: FnAbiError<'tcx>,
2897         span: Span,
2898         fn_abi_request: FnAbiRequest<'tcx>,
2899     ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2900 }
2901
2902 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2903 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2904     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2905     ///
2906     /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2907     /// instead, where the instance is an `InstanceDef::Virtual`.
2908     #[inline]
2909     fn fn_abi_of_fn_ptr(
2910         &self,
2911         sig: ty::PolyFnSig<'tcx>,
2912         extra_args: &'tcx ty::List<Ty<'tcx>>,
2913     ) -> Self::FnAbiOfResult {
2914         // FIXME(eddyb) get a better `span` here.
2915         let span = self.layout_tcx_at_span();
2916         let tcx = self.tcx().at(span);
2917
2918         MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2919             |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2920         ))
2921     }
2922
2923     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2924     /// direct calls to an `fn`.
2925     ///
2926     /// NB: that includes virtual calls, which are represented by "direct calls"
2927     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2928     #[inline]
2929     fn fn_abi_of_instance(
2930         &self,
2931         instance: ty::Instance<'tcx>,
2932         extra_args: &'tcx ty::List<Ty<'tcx>>,
2933     ) -> Self::FnAbiOfResult {
2934         // FIXME(eddyb) get a better `span` here.
2935         let span = self.layout_tcx_at_span();
2936         let tcx = self.tcx().at(span);
2937
2938         MaybeResult::from(
2939             tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2940                 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2941                 // we can get some kind of span even if one wasn't provided.
2942                 // However, we don't do this early in order to avoid calling
2943                 // `def_span` unconditionally (which may have a perf penalty).
2944                 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2945                 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2946             }),
2947         )
2948     }
2949 }
2950
2951 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2952
2953 fn fn_abi_of_fn_ptr<'tcx>(
2954     tcx: TyCtxt<'tcx>,
2955     query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2956 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2957     let (param_env, (sig, extra_args)) = query.into_parts();
2958
2959     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2960         sig,
2961         extra_args,
2962         None,
2963         CodegenFnAttrFlags::empty(),
2964         false,
2965     )
2966 }
2967
2968 fn fn_abi_of_instance<'tcx>(
2969     tcx: TyCtxt<'tcx>,
2970     query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2971 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2972     let (param_env, (instance, extra_args)) = query.into_parts();
2973
2974     let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
2975
2976     let caller_location = if instance.def.requires_caller_location(tcx) {
2977         Some(tcx.caller_location_ty())
2978     } else {
2979         None
2980     };
2981
2982     let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
2983
2984     LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2985         sig,
2986         extra_args,
2987         caller_location,
2988         attrs,
2989         matches!(instance.def, ty::InstanceDef::Virtual(..)),
2990     )
2991 }
2992
2993 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
2994     // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
2995     // arguments of this method, into a separate `struct`.
2996     fn fn_abi_new_uncached(
2997         &self,
2998         sig: ty::PolyFnSig<'tcx>,
2999         extra_args: &[Ty<'tcx>],
3000         caller_location: Option<Ty<'tcx>>,
3001         codegen_fn_attr_flags: CodegenFnAttrFlags,
3002         // FIXME(eddyb) replace this with something typed, like an `enum`.
3003         force_thin_self_ptr: bool,
3004     ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3005         debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3006
3007         let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3008
3009         let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3010
3011         let mut inputs = sig.inputs();
3012         let extra_args = if sig.abi == RustCall {
3013             assert!(!sig.c_variadic && extra_args.is_empty());
3014
3015             if let Some(input) = sig.inputs().last() {
3016                 if let ty::Tuple(tupled_arguments) = input.kind() {
3017                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3018                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
3019                 } else {
3020                     bug!(
3021                         "argument to function with \"rust-call\" ABI \
3022                             is not a tuple"
3023                     );
3024                 }
3025             } else {
3026                 bug!(
3027                     "argument to function with \"rust-call\" ABI \
3028                         is not a tuple"
3029                 );
3030             }
3031         } else {
3032             assert!(sig.c_variadic || extra_args.is_empty());
3033             extra_args.to_vec()
3034         };
3035
3036         let target = &self.tcx.sess.target;
3037         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3038         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3039         let linux_s390x_gnu_like =
3040             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3041         let linux_sparc64_gnu_like =
3042             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3043         let linux_powerpc_gnu_like =
3044             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3045         use SpecAbi::*;
3046         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3047
3048         // Handle safe Rust thin and fat pointers.
3049         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3050                                       scalar: Scalar,
3051                                       layout: TyAndLayout<'tcx>,
3052                                       offset: Size,
3053                                       is_return: bool| {
3054             // Booleans are always a noundef i1 that needs to be zero-extended.
3055             if scalar.is_bool() {
3056                 attrs.ext(ArgExtension::Zext);
3057                 attrs.set(ArgAttribute::NoUndef);
3058                 return;
3059             }
3060
3061             // Only pointer types handled below.
3062             if scalar.value != Pointer {
3063                 return;
3064             }
3065
3066             if !scalar.valid_range.contains(0) {
3067                 attrs.set(ArgAttribute::NonNull);
3068             }
3069
3070             if let Some(pointee) = layout.pointee_info_at(self, offset) {
3071                 if let Some(kind) = pointee.safe {
3072                     attrs.pointee_align = Some(pointee.align);
3073
3074                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3075                     // for the entire duration of the function as they can be deallocated
3076                     // at any time. Set their valid size to 0.
3077                     attrs.pointee_size = match kind {
3078                         PointerKind::UniqueOwned => Size::ZERO,
3079                         _ => pointee.size,
3080                     };
3081
3082                     // `Box`, `&T`, and `&mut T` cannot be undef.
3083                     // Note that this only applies to the value of the pointer itself;
3084                     // this attribute doesn't make it UB for the pointed-to data to be undef.
3085                     attrs.set(ArgAttribute::NoUndef);
3086
3087                     // `Box` pointer parameters never alias because ownership is transferred
3088                     // `&mut` pointer parameters never alias other parameters,
3089                     // or mutable global data
3090                     //
3091                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3092                     // and can be marked as both `readonly` and `noalias`, as
3093                     // LLVM's definition of `noalias` is based solely on memory
3094                     // dependencies rather than pointer equality
3095                     //
3096                     // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3097                     // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3098                     // or not to actually emit the attribute. It can also be controlled with the
3099                     // `-Zmutable-noalias` debugging option.
3100                     let no_alias = match kind {
3101                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3102                         PointerKind::UniqueOwned => true,
3103                         PointerKind::Frozen => !is_return,
3104                     };
3105                     if no_alias {
3106                         attrs.set(ArgAttribute::NoAlias);
3107                     }
3108
3109                     if kind == PointerKind::Frozen && !is_return {
3110                         attrs.set(ArgAttribute::ReadOnly);
3111                     }
3112
3113                     if kind == PointerKind::UniqueBorrowed && !is_return {
3114                         attrs.set(ArgAttribute::NoAliasMutRef);
3115                     }
3116                 }
3117             }
3118         };
3119
3120         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3121             let is_return = arg_idx.is_none();
3122
3123             let layout = self.layout_of(ty)?;
3124             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3125                 // Don't pass the vtable, it's not an argument of the virtual fn.
3126                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3127                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3128                 make_thin_self_ptr(self, layout)
3129             } else {
3130                 layout
3131             };
3132
3133             let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3134                 let mut attrs = ArgAttributes::new();
3135                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3136                 attrs
3137             });
3138
3139             if arg.layout.is_zst() {
3140                 // For some forsaken reason, x86_64-pc-windows-gnu
3141                 // doesn't ignore zero-sized struct arguments.
3142                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3143                 if is_return
3144                     || rust_abi
3145                     || (!win_x64_gnu
3146                         && !linux_s390x_gnu_like
3147                         && !linux_sparc64_gnu_like
3148                         && !linux_powerpc_gnu_like)
3149                 {
3150                     arg.mode = PassMode::Ignore;
3151                 }
3152             }
3153
3154             Ok(arg)
3155         };
3156
3157         let mut fn_abi = FnAbi {
3158             ret: arg_of(sig.output(), None)?,
3159             args: inputs
3160                 .iter()
3161                 .cloned()
3162                 .chain(extra_args)
3163                 .chain(caller_location)
3164                 .enumerate()
3165                 .map(|(i, ty)| arg_of(ty, Some(i)))
3166                 .collect::<Result<_, _>>()?,
3167             c_variadic: sig.c_variadic,
3168             fixed_count: inputs.len(),
3169             conv,
3170             can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3171         };
3172         self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3173         debug!("fn_abi_new_uncached = {:?}", fn_abi);
3174         Ok(self.tcx.arena.alloc(fn_abi))
3175     }
3176
3177     fn fn_abi_adjust_for_abi(
3178         &self,
3179         fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3180         abi: SpecAbi,
3181     ) -> Result<(), FnAbiError<'tcx>> {
3182         if abi == SpecAbi::Unadjusted {
3183             return Ok(());
3184         }
3185
3186         if abi == SpecAbi::Rust
3187             || abi == SpecAbi::RustCall
3188             || abi == SpecAbi::RustIntrinsic
3189             || abi == SpecAbi::PlatformIntrinsic
3190         {
3191             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3192                 if arg.is_ignore() {
3193                     return;
3194                 }
3195
3196                 match arg.layout.abi {
3197                     Abi::Aggregate { .. } => {}
3198
3199                     // This is a fun case! The gist of what this is doing is
3200                     // that we want callers and callees to always agree on the
3201                     // ABI of how they pass SIMD arguments. If we were to *not*
3202                     // make these arguments indirect then they'd be immediates
3203                     // in LLVM, which means that they'd used whatever the
3204                     // appropriate ABI is for the callee and the caller. That
3205                     // means, for example, if the caller doesn't have AVX
3206                     // enabled but the callee does, then passing an AVX argument
3207                     // across this boundary would cause corrupt data to show up.
3208                     //
3209                     // This problem is fixed by unconditionally passing SIMD
3210                     // arguments through memory between callers and callees
3211                     // which should get them all to agree on ABI regardless of
3212                     // target feature sets. Some more information about this
3213                     // issue can be found in #44367.
3214                     //
3215                     // Note that the platform intrinsic ABI is exempt here as
3216                     // that's how we connect up to LLVM and it's unstable
3217                     // anyway, we control all calls to it in libstd.
3218                     Abi::Vector { .. }
3219                         if abi != SpecAbi::PlatformIntrinsic
3220                             && self.tcx.sess.target.simd_types_indirect =>
3221                     {
3222                         arg.make_indirect();
3223                         return;
3224                     }
3225
3226                     _ => return,
3227                 }
3228
3229                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
3230                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
3231                 let max_by_val_size = Pointer.size(self) * 2;
3232                 let size = arg.layout.size;
3233
3234                 if arg.layout.is_unsized() || size > max_by_val_size {
3235                     arg.make_indirect();
3236                 } else {
3237                     // We want to pass small aggregates as immediates, but using
3238                     // a LLVM aggregate type for this leads to bad optimizations,
3239                     // so we pick an appropriately sized integer type instead.
3240                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3241                 }
3242             };
3243             fixup(&mut fn_abi.ret);
3244             for arg in &mut fn_abi.args {
3245                 fixup(arg);
3246             }
3247         } else {
3248             fn_abi.adjust_for_foreign_abi(self, abi)?;
3249         }
3250
3251         Ok(())
3252     }
3253 }
3254
3255 fn make_thin_self_ptr<'tcx>(
3256     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3257     layout: TyAndLayout<'tcx>,
3258 ) -> TyAndLayout<'tcx> {
3259     let tcx = cx.tcx();
3260     let fat_pointer_ty = if layout.is_unsized() {
3261         // unsized `self` is passed as a pointer to `self`
3262         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3263         tcx.mk_mut_ptr(layout.ty)
3264     } else {
3265         match layout.abi {
3266             Abi::ScalarPair(..) => (),
3267             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3268         }
3269
3270         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3271         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3272         // elsewhere in the compiler as a method on a `dyn Trait`.
3273         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3274         // get a built-in pointer type
3275         let mut fat_pointer_layout = layout;
3276         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3277             && !fat_pointer_layout.ty.is_region_ptr()
3278         {
3279             for i in 0..fat_pointer_layout.fields.count() {
3280                 let field_layout = fat_pointer_layout.field(cx, i);
3281
3282                 if !field_layout.is_zst() {
3283                     fat_pointer_layout = field_layout;
3284                     continue 'descend_newtypes;
3285                 }
3286             }
3287
3288             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3289         }
3290
3291         fat_pointer_layout.ty
3292     };
3293
3294     // we now have a type like `*mut RcBox<dyn Trait>`
3295     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3296     // this is understood as a special case elsewhere in the compiler
3297     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3298
3299     TyAndLayout {
3300         ty: fat_pointer_ty,
3301
3302         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3303         // should always work because the type is always `*mut ()`.
3304         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3305     }
3306 }