]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
rustc_target: rename `TyAndLayoutMethods` to `TyAbiInterface`.
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast as ast;
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
35     fn repr_discr<'tcx>(
36         tcx: TyCtxt<'tcx>,
37         ty: Ty<'tcx>,
38         repr: &ReprOptions,
39         min: i128,
40         max: i128,
41     ) -> (Integer, bool);
42 }
43
44 impl IntegerExt for Integer {
45     #[inline]
46     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
47         match (*self, signed) {
48             (I8, false) => tcx.types.u8,
49             (I16, false) => tcx.types.u16,
50             (I32, false) => tcx.types.u32,
51             (I64, false) => tcx.types.u64,
52             (I128, false) => tcx.types.u128,
53             (I8, true) => tcx.types.i8,
54             (I16, true) => tcx.types.i16,
55             (I32, true) => tcx.types.i32,
56             (I64, true) => tcx.types.i64,
57             (I128, true) => tcx.types.i128,
58         }
59     }
60
61     /// Gets the Integer type from an attr::IntType.
62     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
63         let dl = cx.data_layout();
64
65         match ity {
66             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
67             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
68             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
69             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
70             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
71             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
72                 dl.ptr_sized_integer()
73             }
74         }
75     }
76
77     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
78         match ity {
79             ty::IntTy::I8 => I8,
80             ty::IntTy::I16 => I16,
81             ty::IntTy::I32 => I32,
82             ty::IntTy::I64 => I64,
83             ty::IntTy::I128 => I128,
84             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
85         }
86     }
87     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
88         match ity {
89             ty::UintTy::U8 => I8,
90             ty::UintTy::U16 => I16,
91             ty::UintTy::U32 => I32,
92             ty::UintTy::U64 => I64,
93             ty::UintTy::U128 => I128,
94             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
95         }
96     }
97
98     /// Finds the appropriate Integer type and signedness for the given
99     /// signed discriminant range and `#[repr]` attribute.
100     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
101     /// that shouldn't affect anything, other than maybe debuginfo.
102     fn repr_discr<'tcx>(
103         tcx: TyCtxt<'tcx>,
104         ty: Ty<'tcx>,
105         repr: &ReprOptions,
106         min: i128,
107         max: i128,
108     ) -> (Integer, bool) {
109         // Theoretically, negative values could be larger in unsigned representation
110         // than the unsigned representation of the signed minimum. However, if there
111         // are any negative values, the only valid unsigned representation is u128
112         // which can fit all i128 values, so the result remains unaffected.
113         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
114         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
115
116         if let Some(ity) = repr.int {
117             let discr = Integer::from_attr(&tcx, ity);
118             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
119             if discr < fit {
120                 bug!(
121                     "Integer::repr_discr: `#[repr]` hint too small for \
122                       discriminant range of enum `{}",
123                     ty
124                 )
125             }
126             return (discr, ity.is_signed());
127         }
128
129         let at_least = if repr.c() {
130             // This is usually I32, however it can be different on some platforms,
131             // notably hexagon and arm-none/thumb-none
132             tcx.data_layout().c_enum_min_size
133         } else {
134             // repr(Rust) enums try to be as small as possible
135             I8
136         };
137
138         // If there are no negative values, we can use the unsigned fit.
139         if min >= 0 {
140             (cmp::max(unsigned_fit, at_least), false)
141         } else {
142             (cmp::max(signed_fit, at_least), true)
143         }
144     }
145 }
146
147 pub trait PrimitiveExt {
148     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
149     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
150 }
151
152 impl PrimitiveExt for Primitive {
153     #[inline]
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
155         match *self {
156             Int(i, signed) => i.to_ty(tcx, signed),
157             F32 => tcx.types.f32,
158             F64 => tcx.types.f64,
159             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
160         }
161     }
162
163     /// Return an *integer* type matching this primitive.
164     /// Useful in particular when dealing with enum discriminants.
165     #[inline]
166     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
167         match *self {
168             Int(i, signed) => i.to_ty(tcx, signed),
169             Pointer => tcx.types.usize,
170             F32 | F64 => bug!("floats do not have an int type"),
171         }
172     }
173 }
174
175 /// The first half of a fat pointer.
176 ///
177 /// - For a trait object, this is the address of the box.
178 /// - For a slice, this is the base address.
179 pub const FAT_PTR_ADDR: usize = 0;
180
181 /// The second half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the vtable.
184 /// - For a slice, this is the length.
185 pub const FAT_PTR_EXTRA: usize = 1;
186
187 /// The maximum supported number of lanes in a SIMD vector.
188 ///
189 /// This value is selected based on backend support:
190 /// * LLVM does not appear to have a vector width limit.
191 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
192 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
193
194 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
195 pub enum LayoutError<'tcx> {
196     Unknown(Ty<'tcx>),
197     SizeOverflow(Ty<'tcx>),
198 }
199
200 impl<'tcx> fmt::Display for LayoutError<'tcx> {
201     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
202         match *self {
203             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
204             LayoutError::SizeOverflow(ty) => {
205                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
206             }
207         }
208     }
209 }
210
211 fn layout_of<'tcx>(
212     tcx: TyCtxt<'tcx>,
213     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
214 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
215     ty::tls::with_related_context(tcx, move |icx| {
216         let (param_env, ty) = query.into_parts();
217
218         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
219             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
220         }
221
222         // Update the ImplicitCtxt to increase the layout_depth
223         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
224
225         ty::tls::enter_context(&icx, |_| {
226             let param_env = param_env.with_reveal_all_normalized(tcx);
227             let unnormalized_ty = ty;
228             let ty = tcx.normalize_erasing_regions(param_env, ty);
229             if ty != unnormalized_ty {
230                 // Ensure this layout is also cached for the normalized type.
231                 return tcx.layout_of(param_env.and(ty));
232             }
233
234             let cx = LayoutCx { tcx, param_env };
235
236             let layout = cx.layout_of_uncached(ty)?;
237             let layout = TyAndLayout { ty, layout };
238
239             cx.record_layout_for_printing(layout);
240
241             // Type-level uninhabitedness should always imply ABI uninhabitedness.
242             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
243                 assert!(layout.abi.is_uninhabited());
244             }
245
246             Ok(layout)
247         })
248     })
249 }
250
251 pub fn provide(providers: &mut ty::query::Providers) {
252     *providers = ty::query::Providers { layout_of, ..*providers };
253 }
254
255 pub struct LayoutCx<'tcx, C> {
256     pub tcx: C,
257     pub param_env: ty::ParamEnv<'tcx>,
258 }
259
260 #[derive(Copy, Clone, Debug)]
261 enum StructKind {
262     /// A tuple, closure, or univariant which cannot be coerced to unsized.
263     AlwaysSized,
264     /// A univariant, the last field of which may be coerced to unsized.
265     MaybeUnsized,
266     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
267     Prefixed(Size, Align),
268 }
269
270 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
271 // This is used to go between `memory_index` (source field order to memory order)
272 // and `inverse_memory_index` (memory order to source field order).
273 // See also `FieldsShape::Arbitrary::memory_index` for more details.
274 // FIXME(eddyb) build a better abstraction for permutations, if possible.
275 fn invert_mapping(map: &[u32]) -> Vec<u32> {
276     let mut inverse = vec![0; map.len()];
277     for i in 0..map.len() {
278         inverse[map[i] as usize] = i as u32;
279     }
280     inverse
281 }
282
283 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
284     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
285         let dl = self.data_layout();
286         let b_align = b.value.align(dl);
287         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
288         let b_offset = a.value.size(dl).align_to(b_align.abi);
289         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
290
291         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
292         // returns the last maximum.
293         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
294             .into_iter()
295             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
296             .max_by_key(|niche| niche.available(dl));
297
298         Layout {
299             variants: Variants::Single { index: VariantIdx::new(0) },
300             fields: FieldsShape::Arbitrary {
301                 offsets: vec![Size::ZERO, b_offset],
302                 memory_index: vec![0, 1],
303             },
304             abi: Abi::ScalarPair(a, b),
305             largest_niche,
306             align,
307             size,
308         }
309     }
310
311     fn univariant_uninterned(
312         &self,
313         ty: Ty<'tcx>,
314         fields: &[TyAndLayout<'_>],
315         repr: &ReprOptions,
316         kind: StructKind,
317     ) -> Result<Layout, LayoutError<'tcx>> {
318         let dl = self.data_layout();
319         let pack = repr.pack;
320         if pack.is_some() && repr.align.is_some() {
321             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
322             return Err(LayoutError::Unknown(ty));
323         }
324
325         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
326
327         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
328
329         let optimize = !repr.inhibit_struct_field_reordering_opt();
330         if optimize {
331             let end =
332                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
333             let optimizing = &mut inverse_memory_index[..end];
334             let field_align = |f: &TyAndLayout<'_>| {
335                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
336             };
337             match kind {
338                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
339                     optimizing.sort_by_key(|&x| {
340                         // Place ZSTs first to avoid "interesting offsets",
341                         // especially with only one or two non-ZST fields.
342                         let f = &fields[x as usize];
343                         (!f.is_zst(), cmp::Reverse(field_align(f)))
344                     });
345                 }
346                 StructKind::Prefixed(..) => {
347                     // Sort in ascending alignment so that the layout stay optimal
348                     // regardless of the prefix
349                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
350                 }
351             }
352         }
353
354         // inverse_memory_index holds field indices by increasing memory offset.
355         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
356         // We now write field offsets to the corresponding offset slot;
357         // field 5 with offset 0 puts 0 in offsets[5].
358         // At the bottom of this function, we invert `inverse_memory_index` to
359         // produce `memory_index` (see `invert_mapping`).
360
361         let mut sized = true;
362         let mut offsets = vec![Size::ZERO; fields.len()];
363         let mut offset = Size::ZERO;
364         let mut largest_niche = None;
365         let mut largest_niche_available = 0;
366
367         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
368             let prefix_align =
369                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
370             align = align.max(AbiAndPrefAlign::new(prefix_align));
371             offset = prefix_size.align_to(prefix_align);
372         }
373
374         for &i in &inverse_memory_index {
375             let field = fields[i as usize];
376             if !sized {
377                 self.tcx.sess.delay_span_bug(
378                     DUMMY_SP,
379                     &format!(
380                         "univariant: field #{} of `{}` comes after unsized field",
381                         offsets.len(),
382                         ty
383                     ),
384                 );
385             }
386
387             if field.is_unsized() {
388                 sized = false;
389             }
390
391             // Invariant: offset < dl.obj_size_bound() <= 1<<61
392             let field_align = if let Some(pack) = pack {
393                 field.align.min(AbiAndPrefAlign::new(pack))
394             } else {
395                 field.align
396             };
397             offset = offset.align_to(field_align.abi);
398             align = align.max(field_align);
399
400             debug!("univariant offset: {:?} field: {:#?}", offset, field);
401             offsets[i as usize] = offset;
402
403             if !repr.hide_niche() {
404                 if let Some(mut niche) = field.largest_niche.clone() {
405                     let available = niche.available(dl);
406                     if available > largest_niche_available {
407                         largest_niche_available = available;
408                         niche.offset += offset;
409                         largest_niche = Some(niche);
410                     }
411                 }
412             }
413
414             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
415         }
416
417         if let Some(repr_align) = repr.align {
418             align = align.max(AbiAndPrefAlign::new(repr_align));
419         }
420
421         debug!("univariant min_size: {:?}", offset);
422         let min_size = offset;
423
424         // As stated above, inverse_memory_index holds field indices by increasing offset.
425         // This makes it an already-sorted view of the offsets vec.
426         // To invert it, consider:
427         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
428         // Field 5 would be the first element, so memory_index is i:
429         // Note: if we didn't optimize, it's already right.
430
431         let memory_index =
432             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
433
434         let size = min_size.align_to(align.abi);
435         let mut abi = Abi::Aggregate { sized };
436
437         // Unpack newtype ABIs and find scalar pairs.
438         if sized && size.bytes() > 0 {
439             // All other fields must be ZSTs.
440             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
441
442             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
443                 // We have exactly one non-ZST field.
444                 (Some((i, field)), None, None) => {
445                     // Field fills the struct and it has a scalar or scalar pair ABI.
446                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
447                     {
448                         match field.abi {
449                             // For plain scalars, or vectors of them, we can't unpack
450                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
451                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
452                                 abi = field.abi.clone();
453                             }
454                             // But scalar pairs are Rust-specific and get
455                             // treated as aggregates by C ABIs anyway.
456                             Abi::ScalarPair(..) => {
457                                 abi = field.abi.clone();
458                             }
459                             _ => {}
460                         }
461                     }
462                 }
463
464                 // Two non-ZST fields, and they're both scalars.
465                 (
466                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
467                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
468                     None,
469                 ) => {
470                     // Order by the memory placement, not source order.
471                     let ((i, a), (j, b)) =
472                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
473                     let pair = self.scalar_pair(a.clone(), b.clone());
474                     let pair_offsets = match pair.fields {
475                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
476                             assert_eq!(memory_index, &[0, 1]);
477                             offsets
478                         }
479                         _ => bug!(),
480                     };
481                     if offsets[i] == pair_offsets[0]
482                         && offsets[j] == pair_offsets[1]
483                         && align == pair.align
484                         && size == pair.size
485                     {
486                         // We can use `ScalarPair` only when it matches our
487                         // already computed layout (including `#[repr(C)]`).
488                         abi = pair.abi;
489                     }
490                 }
491
492                 _ => {}
493             }
494         }
495
496         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
497             abi = Abi::Uninhabited;
498         }
499
500         Ok(Layout {
501             variants: Variants::Single { index: VariantIdx::new(0) },
502             fields: FieldsShape::Arbitrary { offsets, memory_index },
503             abi,
504             largest_niche,
505             align,
506             size,
507         })
508     }
509
510     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
511         let tcx = self.tcx;
512         let param_env = self.param_env;
513         let dl = self.data_layout();
514         let scalar_unit = |value: Primitive| {
515             let bits = value.size(dl).bits();
516             assert!(bits <= 128);
517             Scalar { value, valid_range: WrappingRange { start: 0, end: (!0 >> (128 - bits)) } }
518         };
519         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
520
521         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
522             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
523         };
524         debug_assert!(!ty.has_infer_types_or_consts());
525
526         Ok(match *ty.kind() {
527             // Basic scalars.
528             ty::Bool => tcx.intern_layout(Layout::scalar(
529                 self,
530                 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
531             )),
532             ty::Char => tcx.intern_layout(Layout::scalar(
533                 self,
534                 Scalar {
535                     value: Int(I32, false),
536                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
537                 },
538             )),
539             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
540             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
541             ty::Float(fty) => scalar(match fty {
542                 ty::FloatTy::F32 => F32,
543                 ty::FloatTy::F64 => F64,
544             }),
545             ty::FnPtr(_) => {
546                 let mut ptr = scalar_unit(Pointer);
547                 ptr.valid_range = ptr.valid_range.with_start(1);
548                 tcx.intern_layout(Layout::scalar(self, ptr))
549             }
550
551             // The never type.
552             ty::Never => tcx.intern_layout(Layout {
553                 variants: Variants::Single { index: VariantIdx::new(0) },
554                 fields: FieldsShape::Primitive,
555                 abi: Abi::Uninhabited,
556                 largest_niche: None,
557                 align: dl.i8_align,
558                 size: Size::ZERO,
559             }),
560
561             // Potentially-wide pointers.
562             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
563                 let mut data_ptr = scalar_unit(Pointer);
564                 if !ty.is_unsafe_ptr() {
565                     data_ptr.valid_range = data_ptr.valid_range.with_start(1);
566                 }
567
568                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
569                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
570                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
571                 }
572
573                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
574                 let metadata = match unsized_part.kind() {
575                     ty::Foreign(..) => {
576                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
577                     }
578                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
579                     ty::Dynamic(..) => {
580                         let mut vtable = scalar_unit(Pointer);
581                         vtable.valid_range = vtable.valid_range.with_start(1);
582                         vtable
583                     }
584                     _ => return Err(LayoutError::Unknown(unsized_part)),
585                 };
586
587                 // Effectively a (ptr, meta) tuple.
588                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
589             }
590
591             // Arrays and slices.
592             ty::Array(element, mut count) => {
593                 if count.has_projections() {
594                     count = tcx.normalize_erasing_regions(param_env, count);
595                     if count.has_projections() {
596                         return Err(LayoutError::Unknown(ty));
597                     }
598                 }
599
600                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
601                 let element = self.layout_of(element)?;
602                 let size =
603                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
604
605                 let abi =
606                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
607                         Abi::Uninhabited
608                     } else {
609                         Abi::Aggregate { sized: true }
610                     };
611
612                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
613
614                 tcx.intern_layout(Layout {
615                     variants: Variants::Single { index: VariantIdx::new(0) },
616                     fields: FieldsShape::Array { stride: element.size, count },
617                     abi,
618                     largest_niche,
619                     align: element.align,
620                     size,
621                 })
622             }
623             ty::Slice(element) => {
624                 let element = self.layout_of(element)?;
625                 tcx.intern_layout(Layout {
626                     variants: Variants::Single { index: VariantIdx::new(0) },
627                     fields: FieldsShape::Array { stride: element.size, count: 0 },
628                     abi: Abi::Aggregate { sized: false },
629                     largest_niche: None,
630                     align: element.align,
631                     size: Size::ZERO,
632                 })
633             }
634             ty::Str => tcx.intern_layout(Layout {
635                 variants: Variants::Single { index: VariantIdx::new(0) },
636                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
637                 abi: Abi::Aggregate { sized: false },
638                 largest_niche: None,
639                 align: dl.i8_align,
640                 size: Size::ZERO,
641             }),
642
643             // Odd unit types.
644             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
645             ty::Dynamic(..) | ty::Foreign(..) => {
646                 let mut unit = self.univariant_uninterned(
647                     ty,
648                     &[],
649                     &ReprOptions::default(),
650                     StructKind::AlwaysSized,
651                 )?;
652                 match unit.abi {
653                     Abi::Aggregate { ref mut sized } => *sized = false,
654                     _ => bug!(),
655                 }
656                 tcx.intern_layout(unit)
657             }
658
659             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
660
661             ty::Closure(_, ref substs) => {
662                 let tys = substs.as_closure().upvar_tys();
663                 univariant(
664                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
665                     &ReprOptions::default(),
666                     StructKind::AlwaysSized,
667                 )?
668             }
669
670             ty::Tuple(tys) => {
671                 let kind =
672                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
673
674                 univariant(
675                     &tys.iter()
676                         .map(|k| self.layout_of(k.expect_ty()))
677                         .collect::<Result<Vec<_>, _>>()?,
678                     &ReprOptions::default(),
679                     kind,
680                 )?
681             }
682
683             // SIMD vector types.
684             ty::Adt(def, substs) if def.repr.simd() => {
685                 if !def.is_struct() {
686                     // Should have yielded E0517 by now.
687                     tcx.sess.delay_span_bug(
688                         DUMMY_SP,
689                         "#[repr(simd)] was applied to an ADT that is not a struct",
690                     );
691                     return Err(LayoutError::Unknown(ty));
692                 }
693
694                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
695                 //
696                 // * #[repr(simd)] struct S(T, T, T, T);
697                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
698                 // * #[repr(simd)] struct S([T; 4])
699                 //
700                 // where T is a primitive scalar (integer/float/pointer).
701
702                 // SIMD vectors with zero fields are not supported.
703                 // (should be caught by typeck)
704                 if def.non_enum_variant().fields.is_empty() {
705                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
706                 }
707
708                 // Type of the first ADT field:
709                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
710
711                 // Heterogeneous SIMD vectors are not supported:
712                 // (should be caught by typeck)
713                 for fi in &def.non_enum_variant().fields {
714                     if fi.ty(tcx, substs) != f0_ty {
715                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
716                     }
717                 }
718
719                 // The element type and number of elements of the SIMD vector
720                 // are obtained from:
721                 //
722                 // * the element type and length of the single array field, if
723                 // the first field is of array type, or
724                 //
725                 // * the homogenous field type and the number of fields.
726                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
727                     // First ADT field is an array:
728
729                     // SIMD vectors with multiple array fields are not supported:
730                     // (should be caught by typeck)
731                     if def.non_enum_variant().fields.len() != 1 {
732                         tcx.sess.fatal(&format!(
733                             "monomorphising SIMD type `{}` with more than one array field",
734                             ty
735                         ));
736                     }
737
738                     // Extract the number of elements from the layout of the array field:
739                     let len = if let Ok(TyAndLayout {
740                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
741                         ..
742                     }) = self.layout_of(f0_ty)
743                     {
744                         count
745                     } else {
746                         return Err(LayoutError::Unknown(ty));
747                     };
748
749                     (*e_ty, *len, true)
750                 } else {
751                     // First ADT field is not an array:
752                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
753                 };
754
755                 // SIMD vectors of zero length are not supported.
756                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
757                 // support.
758                 //
759                 // Can't be caught in typeck if the array length is generic.
760                 if e_len == 0 {
761                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
762                 } else if e_len > MAX_SIMD_LANES {
763                     tcx.sess.fatal(&format!(
764                         "monomorphising SIMD type `{}` of length greater than {}",
765                         ty, MAX_SIMD_LANES,
766                     ));
767                 }
768
769                 // Compute the ABI of the element type:
770                 let e_ly = self.layout_of(e_ty)?;
771                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
772                     scalar.clone()
773                 } else {
774                     // This error isn't caught in typeck, e.g., if
775                     // the element type of the vector is generic.
776                     tcx.sess.fatal(&format!(
777                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
778                         (integer/float/pointer) element type `{}`",
779                         ty, e_ty
780                     ))
781                 };
782
783                 // Compute the size and alignment of the vector:
784                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
785                 let align = dl.vector_align(size);
786                 let size = size.align_to(align.abi);
787
788                 // Compute the placement of the vector fields:
789                 let fields = if is_array {
790                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
791                 } else {
792                     FieldsShape::Array { stride: e_ly.size, count: e_len }
793                 };
794
795                 tcx.intern_layout(Layout {
796                     variants: Variants::Single { index: VariantIdx::new(0) },
797                     fields,
798                     abi: Abi::Vector { element: e_abi, count: e_len },
799                     largest_niche: e_ly.largest_niche.clone(),
800                     size,
801                     align,
802                 })
803             }
804
805             // ADTs.
806             ty::Adt(def, substs) => {
807                 // Cache the field layouts.
808                 let variants = def
809                     .variants
810                     .iter()
811                     .map(|v| {
812                         v.fields
813                             .iter()
814                             .map(|field| self.layout_of(field.ty(tcx, substs)))
815                             .collect::<Result<Vec<_>, _>>()
816                     })
817                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
818
819                 if def.is_union() {
820                     if def.repr.pack.is_some() && def.repr.align.is_some() {
821                         self.tcx.sess.delay_span_bug(
822                             tcx.def_span(def.did),
823                             "union cannot be packed and aligned",
824                         );
825                         return Err(LayoutError::Unknown(ty));
826                     }
827
828                     let mut align =
829                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
830
831                     if let Some(repr_align) = def.repr.align {
832                         align = align.max(AbiAndPrefAlign::new(repr_align));
833                     }
834
835                     let optimize = !def.repr.inhibit_union_abi_opt();
836                     let mut size = Size::ZERO;
837                     let mut abi = Abi::Aggregate { sized: true };
838                     let index = VariantIdx::new(0);
839                     for field in &variants[index] {
840                         assert!(!field.is_unsized());
841                         align = align.max(field.align);
842
843                         // If all non-ZST fields have the same ABI, forward this ABI
844                         if optimize && !field.is_zst() {
845                             // Normalize scalar_unit to the maximal valid range
846                             let field_abi = match &field.abi {
847                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
848                                 Abi::ScalarPair(x, y) => {
849                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
850                                 }
851                                 Abi::Vector { element: x, count } => {
852                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
853                                 }
854                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
855                                     Abi::Aggregate { sized: true }
856                                 }
857                             };
858
859                             if size == Size::ZERO {
860                                 // first non ZST: initialize 'abi'
861                                 abi = field_abi;
862                             } else if abi != field_abi {
863                                 // different fields have different ABI: reset to Aggregate
864                                 abi = Abi::Aggregate { sized: true };
865                             }
866                         }
867
868                         size = cmp::max(size, field.size);
869                     }
870
871                     if let Some(pack) = def.repr.pack {
872                         align = align.min(AbiAndPrefAlign::new(pack));
873                     }
874
875                     return Ok(tcx.intern_layout(Layout {
876                         variants: Variants::Single { index },
877                         fields: FieldsShape::Union(
878                             NonZeroUsize::new(variants[index].len())
879                                 .ok_or(LayoutError::Unknown(ty))?,
880                         ),
881                         abi,
882                         largest_niche: None,
883                         align,
884                         size: size.align_to(align.abi),
885                     }));
886                 }
887
888                 // A variant is absent if it's uninhabited and only has ZST fields.
889                 // Present uninhabited variants only require space for their fields,
890                 // but *not* an encoding of the discriminant (e.g., a tag value).
891                 // See issue #49298 for more details on the need to leave space
892                 // for non-ZST uninhabited data (mostly partial initialization).
893                 let absent = |fields: &[TyAndLayout<'_>]| {
894                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
895                     let is_zst = fields.iter().all(|f| f.is_zst());
896                     uninhabited && is_zst
897                 };
898                 let (present_first, present_second) = {
899                     let mut present_variants = variants
900                         .iter_enumerated()
901                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
902                     (present_variants.next(), present_variants.next())
903                 };
904                 let present_first = match present_first {
905                     Some(present_first) => present_first,
906                     // Uninhabited because it has no variants, or only absent ones.
907                     None if def.is_enum() => {
908                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
909                     }
910                     // If it's a struct, still compute a layout so that we can still compute the
911                     // field offsets.
912                     None => VariantIdx::new(0),
913                 };
914
915                 let is_struct = !def.is_enum() ||
916                     // Only one variant is present.
917                     (present_second.is_none() &&
918                     // Representation optimizations are allowed.
919                     !def.repr.inhibit_enum_layout_opt());
920                 if is_struct {
921                     // Struct, or univariant enum equivalent to a struct.
922                     // (Typechecking will reject discriminant-sizing attrs.)
923
924                     let v = present_first;
925                     let kind = if def.is_enum() || variants[v].is_empty() {
926                         StructKind::AlwaysSized
927                     } else {
928                         let param_env = tcx.param_env(def.did);
929                         let last_field = def.variants[v].fields.last().unwrap();
930                         let always_sized =
931                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
932                         if !always_sized {
933                             StructKind::MaybeUnsized
934                         } else {
935                             StructKind::AlwaysSized
936                         }
937                     };
938
939                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
940                     st.variants = Variants::Single { index: v };
941                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
942                     match st.abi {
943                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
944                             // the asserts ensure that we are not using the
945                             // `#[rustc_layout_scalar_valid_range(n)]`
946                             // attribute to widen the range of anything as that would probably
947                             // result in UB somewhere
948                             // FIXME(eddyb) the asserts are probably not needed,
949                             // as larger validity ranges would result in missed
950                             // optimizations, *not* wrongly assuming the inner
951                             // value is valid. e.g. unions enlarge validity ranges,
952                             // because the values may be uninitialized.
953                             if let Bound::Included(start) = start {
954                                 // FIXME(eddyb) this might be incorrect - it doesn't
955                                 // account for wrap-around (end < start) ranges.
956                                 assert!(scalar.valid_range.start <= start);
957                                 scalar.valid_range.start = start;
958                             }
959                             if let Bound::Included(end) = end {
960                                 // FIXME(eddyb) this might be incorrect - it doesn't
961                                 // account for wrap-around (end < start) ranges.
962                                 assert!(scalar.valid_range.end >= end);
963                                 scalar.valid_range.end = end;
964                             }
965
966                             // Update `largest_niche` if we have introduced a larger niche.
967                             let niche = if def.repr.hide_niche() {
968                                 None
969                             } else {
970                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
971                             };
972                             if let Some(niche) = niche {
973                                 match &st.largest_niche {
974                                     Some(largest_niche) => {
975                                         // Replace the existing niche even if they're equal,
976                                         // because this one is at a lower offset.
977                                         if largest_niche.available(dl) <= niche.available(dl) {
978                                             st.largest_niche = Some(niche);
979                                         }
980                                     }
981                                     None => st.largest_niche = Some(niche),
982                                 }
983                             }
984                         }
985                         _ => assert!(
986                             start == Bound::Unbounded && end == Bound::Unbounded,
987                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
988                             def,
989                             st,
990                         ),
991                     }
992
993                     return Ok(tcx.intern_layout(st));
994                 }
995
996                 // At this point, we have handled all unions and
997                 // structs. (We have also handled univariant enums
998                 // that allow representation optimization.)
999                 assert!(def.is_enum());
1000
1001                 // The current code for niche-filling relies on variant indices
1002                 // instead of actual discriminants, so dataful enums with
1003                 // explicit discriminants (RFC #2363) would misbehave.
1004                 let no_explicit_discriminants = def
1005                     .variants
1006                     .iter_enumerated()
1007                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1008
1009                 let mut niche_filling_layout = None;
1010
1011                 // Niche-filling enum optimization.
1012                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1013                     let mut dataful_variant = None;
1014                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1015
1016                     // Find one non-ZST variant.
1017                     'variants: for (v, fields) in variants.iter_enumerated() {
1018                         if absent(fields) {
1019                             continue 'variants;
1020                         }
1021                         for f in fields {
1022                             if !f.is_zst() {
1023                                 if dataful_variant.is_none() {
1024                                     dataful_variant = Some(v);
1025                                     continue 'variants;
1026                                 } else {
1027                                     dataful_variant = None;
1028                                     break 'variants;
1029                                 }
1030                             }
1031                         }
1032                         niche_variants = *niche_variants.start().min(&v)..=v;
1033                     }
1034
1035                     if niche_variants.start() > niche_variants.end() {
1036                         dataful_variant = None;
1037                     }
1038
1039                     if let Some(i) = dataful_variant {
1040                         let count = (niche_variants.end().as_u32()
1041                             - niche_variants.start().as_u32()
1042                             + 1) as u128;
1043
1044                         // Find the field with the largest niche
1045                         let niche_candidate = variants[i]
1046                             .iter()
1047                             .enumerate()
1048                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1049                             .max_by_key(|(_, niche)| niche.available(dl));
1050
1051                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1052                             niche_candidate.and_then(|(field_index, niche)| {
1053                                 Some((field_index, niche, niche.reserve(self, count)?))
1054                             })
1055                         {
1056                             let mut align = dl.aggregate_align;
1057                             let st = variants
1058                                 .iter_enumerated()
1059                                 .map(|(j, v)| {
1060                                     let mut st = self.univariant_uninterned(
1061                                         ty,
1062                                         v,
1063                                         &def.repr,
1064                                         StructKind::AlwaysSized,
1065                                     )?;
1066                                     st.variants = Variants::Single { index: j };
1067
1068                                     align = align.max(st.align);
1069
1070                                     Ok(st)
1071                                 })
1072                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1073
1074                             let offset = st[i].fields.offset(field_index) + niche.offset;
1075                             let size = st[i].size;
1076
1077                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1078                                 Abi::Uninhabited
1079                             } else {
1080                                 match st[i].abi {
1081                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1082                                     Abi::ScalarPair(ref first, ref second) => {
1083                                         // We need to use scalar_unit to reset the
1084                                         // valid range to the maximal one for that
1085                                         // primitive, because only the niche is
1086                                         // guaranteed to be initialised, not the
1087                                         // other primitive.
1088                                         if offset.bytes() == 0 {
1089                                             Abi::ScalarPair(
1090                                                 niche_scalar.clone(),
1091                                                 scalar_unit(second.value),
1092                                             )
1093                                         } else {
1094                                             Abi::ScalarPair(
1095                                                 scalar_unit(first.value),
1096                                                 niche_scalar.clone(),
1097                                             )
1098                                         }
1099                                     }
1100                                     _ => Abi::Aggregate { sized: true },
1101                                 }
1102                             };
1103
1104                             let largest_niche =
1105                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1106
1107                             niche_filling_layout = Some(Layout {
1108                                 variants: Variants::Multiple {
1109                                     tag: niche_scalar,
1110                                     tag_encoding: TagEncoding::Niche {
1111                                         dataful_variant: i,
1112                                         niche_variants,
1113                                         niche_start,
1114                                     },
1115                                     tag_field: 0,
1116                                     variants: st,
1117                                 },
1118                                 fields: FieldsShape::Arbitrary {
1119                                     offsets: vec![offset],
1120                                     memory_index: vec![0],
1121                                 },
1122                                 abi,
1123                                 largest_niche,
1124                                 size,
1125                                 align,
1126                             });
1127                         }
1128                     }
1129                 }
1130
1131                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1132                 let discr_type = def.repr.discr_type();
1133                 let bits = Integer::from_attr(self, discr_type).size().bits();
1134                 for (i, discr) in def.discriminants(tcx) {
1135                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1136                         continue;
1137                     }
1138                     let mut x = discr.val as i128;
1139                     if discr_type.is_signed() {
1140                         // sign extend the raw representation to be an i128
1141                         x = (x << (128 - bits)) >> (128 - bits);
1142                     }
1143                     if x < min {
1144                         min = x;
1145                     }
1146                     if x > max {
1147                         max = x;
1148                     }
1149                 }
1150                 // We might have no inhabited variants, so pretend there's at least one.
1151                 if (min, max) == (i128::MAX, i128::MIN) {
1152                     min = 0;
1153                     max = 0;
1154                 }
1155                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1156                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1157
1158                 let mut align = dl.aggregate_align;
1159                 let mut size = Size::ZERO;
1160
1161                 // We're interested in the smallest alignment, so start large.
1162                 let mut start_align = Align::from_bytes(256).unwrap();
1163                 assert_eq!(Integer::for_align(dl, start_align), None);
1164
1165                 // repr(C) on an enum tells us to make a (tag, union) layout,
1166                 // so we need to grow the prefix alignment to be at least
1167                 // the alignment of the union. (This value is used both for
1168                 // determining the alignment of the overall enum, and the
1169                 // determining the alignment of the payload after the tag.)
1170                 let mut prefix_align = min_ity.align(dl).abi;
1171                 if def.repr.c() {
1172                     for fields in &variants {
1173                         for field in fields {
1174                             prefix_align = prefix_align.max(field.align.abi);
1175                         }
1176                     }
1177                 }
1178
1179                 // Create the set of structs that represent each variant.
1180                 let mut layout_variants = variants
1181                     .iter_enumerated()
1182                     .map(|(i, field_layouts)| {
1183                         let mut st = self.univariant_uninterned(
1184                             ty,
1185                             &field_layouts,
1186                             &def.repr,
1187                             StructKind::Prefixed(min_ity.size(), prefix_align),
1188                         )?;
1189                         st.variants = Variants::Single { index: i };
1190                         // Find the first field we can't move later
1191                         // to make room for a larger discriminant.
1192                         for field in
1193                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1194                         {
1195                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1196                                 start_align = start_align.min(field.align.abi);
1197                                 break;
1198                             }
1199                         }
1200                         size = cmp::max(size, st.size);
1201                         align = align.max(st.align);
1202                         Ok(st)
1203                     })
1204                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1205
1206                 // Align the maximum variant size to the largest alignment.
1207                 size = size.align_to(align.abi);
1208
1209                 if size.bytes() >= dl.obj_size_bound() {
1210                     return Err(LayoutError::SizeOverflow(ty));
1211                 }
1212
1213                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1214                 if typeck_ity < min_ity {
1215                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1216                     // some reason at this point (based on values discriminant can take on). Mostly
1217                     // because this discriminant will be loaded, and then stored into variable of
1218                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1219                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1220                     // discriminant values. That would be a bug, because then, in codegen, in order
1221                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1222                     // space necessary to represent would have to be discarded (or layout is wrong
1223                     // on thinking it needs 16 bits)
1224                     bug!(
1225                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1226                         min_ity,
1227                         typeck_ity
1228                     );
1229                     // However, it is fine to make discr type however large (as an optimisation)
1230                     // after this point â€“ we’ll just truncate the value we load in codegen.
1231                 }
1232
1233                 // Check to see if we should use a different type for the
1234                 // discriminant. We can safely use a type with the same size
1235                 // as the alignment of the first field of each variant.
1236                 // We increase the size of the discriminant to avoid LLVM copying
1237                 // padding when it doesn't need to. This normally causes unaligned
1238                 // load/stores and excessive memcpy/memset operations. By using a
1239                 // bigger integer size, LLVM can be sure about its contents and
1240                 // won't be so conservative.
1241
1242                 // Use the initial field alignment
1243                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1244                     min_ity
1245                 } else {
1246                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1247                 };
1248
1249                 // If the alignment is not larger than the chosen discriminant size,
1250                 // don't use the alignment as the final size.
1251                 if ity <= min_ity {
1252                     ity = min_ity;
1253                 } else {
1254                     // Patch up the variants' first few fields.
1255                     let old_ity_size = min_ity.size();
1256                     let new_ity_size = ity.size();
1257                     for variant in &mut layout_variants {
1258                         match variant.fields {
1259                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1260                                 for i in offsets {
1261                                     if *i <= old_ity_size {
1262                                         assert_eq!(*i, old_ity_size);
1263                                         *i = new_ity_size;
1264                                     }
1265                                 }
1266                                 // We might be making the struct larger.
1267                                 if variant.size <= old_ity_size {
1268                                     variant.size = new_ity_size;
1269                                 }
1270                             }
1271                             _ => bug!(),
1272                         }
1273                     }
1274                 }
1275
1276                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1277                 let tag = Scalar {
1278                     value: Int(ity, signed),
1279                     valid_range: WrappingRange {
1280                         start: (min as u128 & tag_mask),
1281                         end: (max as u128 & tag_mask),
1282                     },
1283                 };
1284                 let mut abi = Abi::Aggregate { sized: true };
1285                 if tag.value.size(dl) == size {
1286                     abi = Abi::Scalar(tag.clone());
1287                 } else {
1288                     // Try to use a ScalarPair for all tagged enums.
1289                     let mut common_prim = None;
1290                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1291                         let offsets = match layout_variant.fields {
1292                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1293                             _ => bug!(),
1294                         };
1295                         let mut fields =
1296                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1297                         let (field, offset) = match (fields.next(), fields.next()) {
1298                             (None, None) => continue,
1299                             (Some(pair), None) => pair,
1300                             _ => {
1301                                 common_prim = None;
1302                                 break;
1303                             }
1304                         };
1305                         let prim = match field.abi {
1306                             Abi::Scalar(ref scalar) => scalar.value,
1307                             _ => {
1308                                 common_prim = None;
1309                                 break;
1310                             }
1311                         };
1312                         if let Some(pair) = common_prim {
1313                             // This is pretty conservative. We could go fancier
1314                             // by conflating things like i32 and u32, or even
1315                             // realising that (u8, u8) could just cohabit with
1316                             // u16 or even u32.
1317                             if pair != (prim, offset) {
1318                                 common_prim = None;
1319                                 break;
1320                             }
1321                         } else {
1322                             common_prim = Some((prim, offset));
1323                         }
1324                     }
1325                     if let Some((prim, offset)) = common_prim {
1326                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1327                         let pair_offsets = match pair.fields {
1328                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1329                                 assert_eq!(memory_index, &[0, 1]);
1330                                 offsets
1331                             }
1332                             _ => bug!(),
1333                         };
1334                         if pair_offsets[0] == Size::ZERO
1335                             && pair_offsets[1] == *offset
1336                             && align == pair.align
1337                             && size == pair.size
1338                         {
1339                             // We can use `ScalarPair` only when it matches our
1340                             // already computed layout (including `#[repr(C)]`).
1341                             abi = pair.abi;
1342                         }
1343                     }
1344                 }
1345
1346                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1347                     abi = Abi::Uninhabited;
1348                 }
1349
1350                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1351
1352                 let tagged_layout = Layout {
1353                     variants: Variants::Multiple {
1354                         tag,
1355                         tag_encoding: TagEncoding::Direct,
1356                         tag_field: 0,
1357                         variants: layout_variants,
1358                     },
1359                     fields: FieldsShape::Arbitrary {
1360                         offsets: vec![Size::ZERO],
1361                         memory_index: vec![0],
1362                     },
1363                     largest_niche,
1364                     abi,
1365                     align,
1366                     size,
1367                 };
1368
1369                 let best_layout = match (tagged_layout, niche_filling_layout) {
1370                     (tagged_layout, Some(niche_filling_layout)) => {
1371                         // Pick the smaller layout; otherwise,
1372                         // pick the layout with the larger niche; otherwise,
1373                         // pick tagged as it has simpler codegen.
1374                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1375                             let niche_size =
1376                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1377                             (layout.size, cmp::Reverse(niche_size))
1378                         })
1379                     }
1380                     (tagged_layout, None) => tagged_layout,
1381                 };
1382
1383                 tcx.intern_layout(best_layout)
1384             }
1385
1386             // Types with no meaningful known layout.
1387             ty::Projection(_) | ty::Opaque(..) => {
1388                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1389                 // if that was possible, so there's no reason to try again here.
1390                 return Err(LayoutError::Unknown(ty));
1391             }
1392
1393             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1394                 bug!("Layout::compute: unexpected type `{}`", ty)
1395             }
1396
1397             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1398                 return Err(LayoutError::Unknown(ty));
1399             }
1400         })
1401     }
1402 }
1403
1404 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1405 #[derive(Clone, Debug, PartialEq)]
1406 enum SavedLocalEligibility {
1407     Unassigned,
1408     Assigned(VariantIdx),
1409     // FIXME: Use newtype_index so we aren't wasting bytes
1410     Ineligible(Option<u32>),
1411 }
1412
1413 // When laying out generators, we divide our saved local fields into two
1414 // categories: overlap-eligible and overlap-ineligible.
1415 //
1416 // Those fields which are ineligible for overlap go in a "prefix" at the
1417 // beginning of the layout, and always have space reserved for them.
1418 //
1419 // Overlap-eligible fields are only assigned to one variant, so we lay
1420 // those fields out for each variant and put them right after the
1421 // prefix.
1422 //
1423 // Finally, in the layout details, we point to the fields from the
1424 // variants they are assigned to. It is possible for some fields to be
1425 // included in multiple variants. No field ever "moves around" in the
1426 // layout; its offset is always the same.
1427 //
1428 // Also included in the layout are the upvars and the discriminant.
1429 // These are included as fields on the "outer" layout; they are not part
1430 // of any variant.
1431 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1432     /// Compute the eligibility and assignment of each local.
1433     fn generator_saved_local_eligibility(
1434         &self,
1435         info: &GeneratorLayout<'tcx>,
1436     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1437         use SavedLocalEligibility::*;
1438
1439         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1440             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1441
1442         // The saved locals not eligible for overlap. These will get
1443         // "promoted" to the prefix of our generator.
1444         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1445
1446         // Figure out which of our saved locals are fields in only
1447         // one variant. The rest are deemed ineligible for overlap.
1448         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1449             for local in fields {
1450                 match assignments[*local] {
1451                     Unassigned => {
1452                         assignments[*local] = Assigned(variant_index);
1453                     }
1454                     Assigned(idx) => {
1455                         // We've already seen this local at another suspension
1456                         // point, so it is no longer a candidate.
1457                         trace!(
1458                             "removing local {:?} in >1 variant ({:?}, {:?})",
1459                             local,
1460                             variant_index,
1461                             idx
1462                         );
1463                         ineligible_locals.insert(*local);
1464                         assignments[*local] = Ineligible(None);
1465                     }
1466                     Ineligible(_) => {}
1467                 }
1468             }
1469         }
1470
1471         // Next, check every pair of eligible locals to see if they
1472         // conflict.
1473         for local_a in info.storage_conflicts.rows() {
1474             let conflicts_a = info.storage_conflicts.count(local_a);
1475             if ineligible_locals.contains(local_a) {
1476                 continue;
1477             }
1478
1479             for local_b in info.storage_conflicts.iter(local_a) {
1480                 // local_a and local_b are storage live at the same time, therefore they
1481                 // cannot overlap in the generator layout. The only way to guarantee
1482                 // this is if they are in the same variant, or one is ineligible
1483                 // (which means it is stored in every variant).
1484                 if ineligible_locals.contains(local_b)
1485                     || assignments[local_a] == assignments[local_b]
1486                 {
1487                     continue;
1488                 }
1489
1490                 // If they conflict, we will choose one to make ineligible.
1491                 // This is not always optimal; it's just a greedy heuristic that
1492                 // seems to produce good results most of the time.
1493                 let conflicts_b = info.storage_conflicts.count(local_b);
1494                 let (remove, other) =
1495                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1496                 ineligible_locals.insert(remove);
1497                 assignments[remove] = Ineligible(None);
1498                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1499             }
1500         }
1501
1502         // Count the number of variants in use. If only one of them, then it is
1503         // impossible to overlap any locals in our layout. In this case it's
1504         // always better to make the remaining locals ineligible, so we can
1505         // lay them out with the other locals in the prefix and eliminate
1506         // unnecessary padding bytes.
1507         {
1508             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1509             for assignment in &assignments {
1510                 if let Assigned(idx) = assignment {
1511                     used_variants.insert(*idx);
1512                 }
1513             }
1514             if used_variants.count() < 2 {
1515                 for assignment in assignments.iter_mut() {
1516                     *assignment = Ineligible(None);
1517                 }
1518                 ineligible_locals.insert_all();
1519             }
1520         }
1521
1522         // Write down the order of our locals that will be promoted to the prefix.
1523         {
1524             for (idx, local) in ineligible_locals.iter().enumerate() {
1525                 assignments[local] = Ineligible(Some(idx as u32));
1526             }
1527         }
1528         debug!("generator saved local assignments: {:?}", assignments);
1529
1530         (ineligible_locals, assignments)
1531     }
1532
1533     /// Compute the full generator layout.
1534     fn generator_layout(
1535         &self,
1536         ty: Ty<'tcx>,
1537         def_id: hir::def_id::DefId,
1538         substs: SubstsRef<'tcx>,
1539     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1540         use SavedLocalEligibility::*;
1541         let tcx = self.tcx;
1542         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1543
1544         let info = match tcx.generator_layout(def_id) {
1545             None => return Err(LayoutError::Unknown(ty)),
1546             Some(info) => info,
1547         };
1548         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1549
1550         // Build a prefix layout, including "promoting" all ineligible
1551         // locals as part of the prefix. We compute the layout of all of
1552         // these fields at once to get optimal packing.
1553         let tag_index = substs.as_generator().prefix_tys().count();
1554
1555         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1556         let max_discr = (info.variant_fields.len() - 1) as u128;
1557         let discr_int = Integer::fit_unsigned(max_discr);
1558         let discr_int_ty = discr_int.to_ty(tcx, false);
1559         let tag = Scalar {
1560             value: Primitive::Int(discr_int, false),
1561             valid_range: WrappingRange { start: 0, end: max_discr },
1562         };
1563         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1564         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1565
1566         let promoted_layouts = ineligible_locals
1567             .iter()
1568             .map(|local| subst_field(info.field_tys[local]))
1569             .map(|ty| tcx.mk_maybe_uninit(ty))
1570             .map(|ty| self.layout_of(ty));
1571         let prefix_layouts = substs
1572             .as_generator()
1573             .prefix_tys()
1574             .map(|ty| self.layout_of(ty))
1575             .chain(iter::once(Ok(tag_layout)))
1576             .chain(promoted_layouts)
1577             .collect::<Result<Vec<_>, _>>()?;
1578         let prefix = self.univariant_uninterned(
1579             ty,
1580             &prefix_layouts,
1581             &ReprOptions::default(),
1582             StructKind::AlwaysSized,
1583         )?;
1584
1585         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1586
1587         // Split the prefix layout into the "outer" fields (upvars and
1588         // discriminant) and the "promoted" fields. Promoted fields will
1589         // get included in each variant that requested them in
1590         // GeneratorLayout.
1591         debug!("prefix = {:#?}", prefix);
1592         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1593             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1594                 let mut inverse_memory_index = invert_mapping(&memory_index);
1595
1596                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1597                 // "outer" and "promoted" fields respectively.
1598                 let b_start = (tag_index + 1) as u32;
1599                 let offsets_b = offsets.split_off(b_start as usize);
1600                 let offsets_a = offsets;
1601
1602                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1603                 // by preserving the order but keeping only one disjoint "half" each.
1604                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1605                 let inverse_memory_index_b: Vec<_> =
1606                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1607                 inverse_memory_index.retain(|&i| i < b_start);
1608                 let inverse_memory_index_a = inverse_memory_index;
1609
1610                 // Since `inverse_memory_index_{a,b}` each only refer to their
1611                 // respective fields, they can be safely inverted
1612                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1613                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1614
1615                 let outer_fields =
1616                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1617                 (outer_fields, offsets_b, memory_index_b)
1618             }
1619             _ => bug!(),
1620         };
1621
1622         let mut size = prefix.size;
1623         let mut align = prefix.align;
1624         let variants = info
1625             .variant_fields
1626             .iter_enumerated()
1627             .map(|(index, variant_fields)| {
1628                 // Only include overlap-eligible fields when we compute our variant layout.
1629                 let variant_only_tys = variant_fields
1630                     .iter()
1631                     .filter(|local| match assignments[**local] {
1632                         Unassigned => bug!(),
1633                         Assigned(v) if v == index => true,
1634                         Assigned(_) => bug!("assignment does not match variant"),
1635                         Ineligible(_) => false,
1636                     })
1637                     .map(|local| subst_field(info.field_tys[*local]));
1638
1639                 let mut variant = self.univariant_uninterned(
1640                     ty,
1641                     &variant_only_tys
1642                         .map(|ty| self.layout_of(ty))
1643                         .collect::<Result<Vec<_>, _>>()?,
1644                     &ReprOptions::default(),
1645                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1646                 )?;
1647                 variant.variants = Variants::Single { index };
1648
1649                 let (offsets, memory_index) = match variant.fields {
1650                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1651                     _ => bug!(),
1652                 };
1653
1654                 // Now, stitch the promoted and variant-only fields back together in
1655                 // the order they are mentioned by our GeneratorLayout.
1656                 // Because we only use some subset (that can differ between variants)
1657                 // of the promoted fields, we can't just pick those elements of the
1658                 // `promoted_memory_index` (as we'd end up with gaps).
1659                 // So instead, we build an "inverse memory_index", as if all of the
1660                 // promoted fields were being used, but leave the elements not in the
1661                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1662                 // obtain a valid (bijective) mapping.
1663                 const INVALID_FIELD_IDX: u32 = !0;
1664                 let mut combined_inverse_memory_index =
1665                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1666                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1667                 let combined_offsets = variant_fields
1668                     .iter()
1669                     .enumerate()
1670                     .map(|(i, local)| {
1671                         let (offset, memory_index) = match assignments[*local] {
1672                             Unassigned => bug!(),
1673                             Assigned(_) => {
1674                                 let (offset, memory_index) =
1675                                     offsets_and_memory_index.next().unwrap();
1676                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1677                             }
1678                             Ineligible(field_idx) => {
1679                                 let field_idx = field_idx.unwrap() as usize;
1680                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1681                             }
1682                         };
1683                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1684                         offset
1685                     })
1686                     .collect();
1687
1688                 // Remove the unused slots and invert the mapping to obtain the
1689                 // combined `memory_index` (also see previous comment).
1690                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1691                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1692
1693                 variant.fields = FieldsShape::Arbitrary {
1694                     offsets: combined_offsets,
1695                     memory_index: combined_memory_index,
1696                 };
1697
1698                 size = size.max(variant.size);
1699                 align = align.max(variant.align);
1700                 Ok(variant)
1701             })
1702             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1703
1704         size = size.align_to(align.abi);
1705
1706         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1707         {
1708             Abi::Uninhabited
1709         } else {
1710             Abi::Aggregate { sized: true }
1711         };
1712
1713         let layout = tcx.intern_layout(Layout {
1714             variants: Variants::Multiple {
1715                 tag,
1716                 tag_encoding: TagEncoding::Direct,
1717                 tag_field: tag_index,
1718                 variants,
1719             },
1720             fields: outer_fields,
1721             abi,
1722             largest_niche: prefix.largest_niche,
1723             size,
1724             align,
1725         });
1726         debug!("generator layout ({:?}): {:#?}", ty, layout);
1727         Ok(layout)
1728     }
1729
1730     /// This is invoked by the `layout_of` query to record the final
1731     /// layout of each type.
1732     #[inline(always)]
1733     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1734         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1735         // for dumping later.
1736         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1737             self.record_layout_for_printing_outlined(layout)
1738         }
1739     }
1740
1741     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1742         // Ignore layouts that are done with non-empty environments or
1743         // non-monomorphic layouts, as the user only wants to see the stuff
1744         // resulting from the final codegen session.
1745         if layout.ty.definitely_has_param_types_or_consts(self.tcx)
1746             || !self.param_env.caller_bounds().is_empty()
1747         {
1748             return;
1749         }
1750
1751         // (delay format until we actually need it)
1752         let record = |kind, packed, opt_discr_size, variants| {
1753             let type_desc = format!("{:?}", layout.ty);
1754             self.tcx.sess.code_stats.record_type_size(
1755                 kind,
1756                 type_desc,
1757                 layout.align.abi,
1758                 layout.size,
1759                 packed,
1760                 opt_discr_size,
1761                 variants,
1762             );
1763         };
1764
1765         let adt_def = match *layout.ty.kind() {
1766             ty::Adt(ref adt_def, _) => {
1767                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1768                 adt_def
1769             }
1770
1771             ty::Closure(..) => {
1772                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1773                 record(DataTypeKind::Closure, false, None, vec![]);
1774                 return;
1775             }
1776
1777             _ => {
1778                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1779                 return;
1780             }
1781         };
1782
1783         let adt_kind = adt_def.adt_kind();
1784         let adt_packed = adt_def.repr.pack.is_some();
1785
1786         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1787             let mut min_size = Size::ZERO;
1788             let field_info: Vec<_> = flds
1789                 .iter()
1790                 .enumerate()
1791                 .map(|(i, &name)| match layout.field(self, i) {
1792                     Err(err) => {
1793                         bug!("no layout found for field {}: `{:?}`", name, err);
1794                     }
1795                     Ok(field_layout) => {
1796                         let offset = layout.fields.offset(i);
1797                         let field_end = offset + field_layout.size;
1798                         if min_size < field_end {
1799                             min_size = field_end;
1800                         }
1801                         FieldInfo {
1802                             name: name.to_string(),
1803                             offset: offset.bytes(),
1804                             size: field_layout.size.bytes(),
1805                             align: field_layout.align.abi.bytes(),
1806                         }
1807                     }
1808                 })
1809                 .collect();
1810
1811             VariantInfo {
1812                 name: n.map(|n| n.to_string()),
1813                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1814                 align: layout.align.abi.bytes(),
1815                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1816                 fields: field_info,
1817             }
1818         };
1819
1820         match layout.variants {
1821             Variants::Single { index } => {
1822                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1823                 if !adt_def.variants.is_empty() {
1824                     let variant_def = &adt_def.variants[index];
1825                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1826                     record(
1827                         adt_kind.into(),
1828                         adt_packed,
1829                         None,
1830                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1831                     );
1832                 } else {
1833                     // (This case arises for *empty* enums; so give it
1834                     // zero variants.)
1835                     record(adt_kind.into(), adt_packed, None, vec![]);
1836                 }
1837             }
1838
1839             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1840                 debug!(
1841                     "print-type-size `{:#?}` adt general variants def {}",
1842                     layout.ty,
1843                     adt_def.variants.len()
1844                 );
1845                 let variant_infos: Vec<_> = adt_def
1846                     .variants
1847                     .iter_enumerated()
1848                     .map(|(i, variant_def)| {
1849                         let fields: Vec<_> =
1850                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1851                         build_variant_info(
1852                             Some(variant_def.ident),
1853                             &fields,
1854                             layout.for_variant(self, i),
1855                         )
1856                     })
1857                     .collect();
1858                 record(
1859                     adt_kind.into(),
1860                     adt_packed,
1861                     match tag_encoding {
1862                         TagEncoding::Direct => Some(tag.value.size(self)),
1863                         _ => None,
1864                     },
1865                     variant_infos,
1866                 );
1867             }
1868         }
1869     }
1870 }
1871
1872 /// Type size "skeleton", i.e., the only information determining a type's size.
1873 /// While this is conservative, (aside from constant sizes, only pointers,
1874 /// newtypes thereof and null pointer optimized enums are allowed), it is
1875 /// enough to statically check common use cases of transmute.
1876 #[derive(Copy, Clone, Debug)]
1877 pub enum SizeSkeleton<'tcx> {
1878     /// Any statically computable Layout.
1879     Known(Size),
1880
1881     /// A potentially-fat pointer.
1882     Pointer {
1883         /// If true, this pointer is never null.
1884         non_zero: bool,
1885         /// The type which determines the unsized metadata, if any,
1886         /// of this pointer. Either a type parameter or a projection
1887         /// depending on one, with regions erased.
1888         tail: Ty<'tcx>,
1889     },
1890 }
1891
1892 impl<'tcx> SizeSkeleton<'tcx> {
1893     pub fn compute(
1894         ty: Ty<'tcx>,
1895         tcx: TyCtxt<'tcx>,
1896         param_env: ty::ParamEnv<'tcx>,
1897     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1898         debug_assert!(!ty.has_infer_types_or_consts());
1899
1900         // First try computing a static layout.
1901         let err = match tcx.layout_of(param_env.and(ty)) {
1902             Ok(layout) => {
1903                 return Ok(SizeSkeleton::Known(layout.size));
1904             }
1905             Err(err) => err,
1906         };
1907
1908         match *ty.kind() {
1909             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1910                 let non_zero = !ty.is_unsafe_ptr();
1911                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1912                 match tail.kind() {
1913                     ty::Param(_) | ty::Projection(_) => {
1914                         debug_assert!(tail.definitely_has_param_types_or_consts(tcx));
1915                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1916                     }
1917                     _ => bug!(
1918                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1919                               tail `{}` is not a type parameter or a projection",
1920                         ty,
1921                         err,
1922                         tail
1923                     ),
1924                 }
1925             }
1926
1927             ty::Adt(def, substs) => {
1928                 // Only newtypes and enums w/ nullable pointer optimization.
1929                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1930                     return Err(err);
1931                 }
1932
1933                 // Get a zero-sized variant or a pointer newtype.
1934                 let zero_or_ptr_variant = |i| {
1935                     let i = VariantIdx::new(i);
1936                     let fields = def.variants[i]
1937                         .fields
1938                         .iter()
1939                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1940                     let mut ptr = None;
1941                     for field in fields {
1942                         let field = field?;
1943                         match field {
1944                             SizeSkeleton::Known(size) => {
1945                                 if size.bytes() > 0 {
1946                                     return Err(err);
1947                                 }
1948                             }
1949                             SizeSkeleton::Pointer { .. } => {
1950                                 if ptr.is_some() {
1951                                     return Err(err);
1952                                 }
1953                                 ptr = Some(field);
1954                             }
1955                         }
1956                     }
1957                     Ok(ptr)
1958                 };
1959
1960                 let v0 = zero_or_ptr_variant(0)?;
1961                 // Newtype.
1962                 if def.variants.len() == 1 {
1963                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1964                         return Ok(SizeSkeleton::Pointer {
1965                             non_zero: non_zero
1966                                 || match tcx.layout_scalar_valid_range(def.did) {
1967                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1968                                     (Bound::Included(start), Bound::Included(end)) => {
1969                                         0 < start && start < end
1970                                     }
1971                                     _ => false,
1972                                 },
1973                             tail,
1974                         });
1975                     } else {
1976                         return Err(err);
1977                     }
1978                 }
1979
1980                 let v1 = zero_or_ptr_variant(1)?;
1981                 // Nullable pointer enum optimization.
1982                 match (v0, v1) {
1983                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1984                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1985                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1986                     }
1987                     _ => Err(err),
1988                 }
1989             }
1990
1991             ty::Projection(_) | ty::Opaque(..) => {
1992                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1993                 if ty == normalized {
1994                     Err(err)
1995                 } else {
1996                     SizeSkeleton::compute(normalized, tcx, param_env)
1997                 }
1998             }
1999
2000             _ => Err(err),
2001         }
2002     }
2003
2004     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2005         match (self, other) {
2006             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2007             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2008                 a == b
2009             }
2010             _ => false,
2011         }
2012     }
2013 }
2014
2015 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2016     fn tcx(&self) -> TyCtxt<'tcx>;
2017 }
2018
2019 pub trait HasParamEnv<'tcx> {
2020     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2021 }
2022
2023 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2024     #[inline]
2025     fn data_layout(&self) -> &TargetDataLayout {
2026         &self.data_layout
2027     }
2028 }
2029
2030 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2031     #[inline]
2032     fn tcx(&self) -> TyCtxt<'tcx> {
2033         *self
2034     }
2035 }
2036
2037 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2038     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2039         self.param_env
2040     }
2041 }
2042
2043 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2044     fn data_layout(&self) -> &TargetDataLayout {
2045         self.tcx.data_layout()
2046     }
2047 }
2048
2049 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2050     fn tcx(&self) -> TyCtxt<'tcx> {
2051         self.tcx.tcx()
2052     }
2053 }
2054
2055 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2056
2057 impl LayoutOf<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2058     type Ty = Ty<'tcx>;
2059     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2060
2061     /// Computes the layout of a type. Note that this implicitly
2062     /// executes in "reveal all" mode, and will normalize the input type.
2063     #[inline]
2064     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2065         self.tcx.layout_of(self.param_env.and(ty))
2066     }
2067 }
2068
2069 impl LayoutOf<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2070     type Ty = Ty<'tcx>;
2071     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2072
2073     /// Computes the layout of a type. Note that this implicitly
2074     /// executes in "reveal all" mode, and will normalize the input type.
2075     #[inline]
2076     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2077         self.tcx.layout_of(self.param_env.and(ty))
2078     }
2079 }
2080
2081 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2082 where
2083     C: LayoutOf<'tcx, Ty = Ty<'tcx>> + HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2084 {
2085     fn ty_and_layout_for_variant(
2086         this: TyAndLayout<'tcx>,
2087         cx: &C,
2088         variant_index: VariantIdx,
2089     ) -> TyAndLayout<'tcx> {
2090         let layout = match this.variants {
2091             Variants::Single { index }
2092                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2093                 if index == variant_index &&
2094                 // Don't confuse variants of uninhabited enums with the enum itself.
2095                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2096                 this.fields != FieldsShape::Primitive =>
2097             {
2098                 this.layout
2099             }
2100
2101             Variants::Single { index } => {
2102                 // Deny calling for_variant more than once for non-Single enums.
2103                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2104                     assert_eq!(original_layout.variants, Variants::Single { index });
2105                 }
2106
2107                 let fields = match this.ty.kind() {
2108                     ty::Adt(def, _) if def.variants.is_empty() =>
2109                         bug!("for_variant called on zero-variant enum"),
2110                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2111                     _ => bug!(),
2112                 };
2113                 let tcx = cx.tcx();
2114                 tcx.intern_layout(Layout {
2115                     variants: Variants::Single { index: variant_index },
2116                     fields: match NonZeroUsize::new(fields) {
2117                         Some(fields) => FieldsShape::Union(fields),
2118                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2119                     },
2120                     abi: Abi::Uninhabited,
2121                     largest_niche: None,
2122                     align: tcx.data_layout.i8_align,
2123                     size: Size::ZERO,
2124                 })
2125             }
2126
2127             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2128         };
2129
2130         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2131
2132         TyAndLayout { ty: this.ty, layout }
2133     }
2134
2135     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2136         enum TyMaybeWithLayout<'tcx, C: LayoutOf<'tcx>> {
2137             Ty(C::Ty),
2138             TyAndLayout(C::TyAndLayout),
2139         }
2140
2141         fn ty_and_layout_kind<
2142             C: LayoutOf<'tcx, Ty = Ty<'tcx>> + HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2143         >(
2144             this: TyAndLayout<'tcx>,
2145             cx: &C,
2146             i: usize,
2147             ty: C::Ty,
2148         ) -> TyMaybeWithLayout<'tcx, C> {
2149             let tcx = cx.tcx();
2150             let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2151                 let layout = Layout::scalar(cx, tag.clone());
2152                 MaybeResult::from(Ok(TyAndLayout {
2153                     layout: tcx.intern_layout(layout),
2154                     ty: tag.value.to_ty(tcx),
2155                 }))
2156             };
2157
2158             match *ty.kind() {
2159                 ty::Bool
2160                 | ty::Char
2161                 | ty::Int(_)
2162                 | ty::Uint(_)
2163                 | ty::Float(_)
2164                 | ty::FnPtr(_)
2165                 | ty::Never
2166                 | ty::FnDef(..)
2167                 | ty::GeneratorWitness(..)
2168                 | ty::Foreign(..)
2169                 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2170
2171                 // Potentially-fat pointers.
2172                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2173                     assert!(i < this.fields.count());
2174
2175                     // Reuse the fat `*T` type as its own thin pointer data field.
2176                     // This provides information about, e.g., DST struct pointees
2177                     // (which may have no non-DST form), and will work as long
2178                     // as the `Abi` or `FieldsShape` is checked by users.
2179                     if i == 0 {
2180                         let nil = tcx.mk_unit();
2181                         let ptr_ty = if ty.is_unsafe_ptr() {
2182                             tcx.mk_mut_ptr(nil)
2183                         } else {
2184                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2185                         };
2186                         return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2187                             cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2188                                 ptr_layout.ty = ty;
2189                                 ptr_layout
2190                             }),
2191                         ));
2192                     }
2193
2194                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2195                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2196                         ty::Dynamic(_, _) => {
2197                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2198                                 tcx.lifetimes.re_static,
2199                                 tcx.mk_array(tcx.types.usize, 3),
2200                             ))
2201                             /* FIXME: use actual fn pointers
2202                             Warning: naively computing the number of entries in the
2203                             vtable by counting the methods on the trait + methods on
2204                             all parent traits does not work, because some methods can
2205                             be not object safe and thus excluded from the vtable.
2206                             Increase this counter if you tried to implement this but
2207                             failed to do it without duplicating a lot of code from
2208                             other places in the compiler: 2
2209                             tcx.mk_tup(&[
2210                                 tcx.mk_array(tcx.types.usize, 3),
2211                                 tcx.mk_array(Option<fn()>),
2212                             ])
2213                             */
2214                         }
2215                         _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2216                     }
2217                 }
2218
2219                 // Arrays and slices.
2220                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2221                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2222
2223                 // Tuples, generators and closures.
2224                 ty::Closure(_, ref substs) => {
2225                     ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2226                 }
2227
2228                 ty::Generator(def_id, ref substs, _) => match this.variants {
2229                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2230                         substs
2231                             .as_generator()
2232                             .state_tys(def_id, tcx)
2233                             .nth(index.as_usize())
2234                             .unwrap()
2235                             .nth(i)
2236                             .unwrap(),
2237                     ),
2238                     Variants::Multiple { ref tag, tag_field, .. } => {
2239                         if i == tag_field {
2240                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2241                         }
2242                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2243                     }
2244                 },
2245
2246                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2247
2248                 // ADTs.
2249                 ty::Adt(def, substs) => {
2250                     match this.variants {
2251                         Variants::Single { index } => {
2252                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2253                         }
2254
2255                         // Discriminant field for enums (where applicable).
2256                         Variants::Multiple { ref tag, .. } => {
2257                             assert_eq!(i, 0);
2258                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2259                         }
2260                     }
2261                 }
2262
2263                 ty::Projection(_)
2264                 | ty::Bound(..)
2265                 | ty::Placeholder(..)
2266                 | ty::Opaque(..)
2267                 | ty::Param(_)
2268                 | ty::Infer(_)
2269                 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2270             }
2271         }
2272
2273         cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2274             TyMaybeWithLayout::Ty(result) => result,
2275             TyMaybeWithLayout::TyAndLayout(result) => return result,
2276         })
2277     }
2278
2279     fn ty_and_layout_pointee_info_at(
2280         this: TyAndLayout<'tcx>,
2281         cx: &C,
2282         offset: Size,
2283     ) -> Option<PointeeInfo> {
2284         let addr_space_of_ty = |ty: Ty<'tcx>| {
2285             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2286         };
2287
2288         let pointee_info = match *this.ty.kind() {
2289             ty::RawPtr(mt) if offset.bytes() == 0 => {
2290                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2291                     size: layout.size,
2292                     align: layout.align.abi,
2293                     safe: None,
2294                     address_space: addr_space_of_ty(mt.ty),
2295                 })
2296             }
2297             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2298                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2299                     PointeeInfo {
2300                         size: layout.size,
2301                         align: layout.align.abi,
2302                         safe: None,
2303                         address_space: cx.data_layout().instruction_address_space,
2304                     }
2305                 })
2306             }
2307             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2308                 let address_space = addr_space_of_ty(ty);
2309                 let tcx = cx.tcx();
2310                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2311                     // Use conservative pointer kind if not optimizing. This saves us the
2312                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2313                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2314                     PointerKind::Shared
2315                 } else {
2316                     match mt {
2317                         hir::Mutability::Not => {
2318                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2319                                 PointerKind::Frozen
2320                             } else {
2321                                 PointerKind::Shared
2322                             }
2323                         }
2324                         hir::Mutability::Mut => {
2325                             // References to self-referential structures should not be considered
2326                             // noalias, as another pointer to the structure can be obtained, that
2327                             // is not based-on the original reference. We consider all !Unpin
2328                             // types to be potentially self-referential here.
2329                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2330                                 PointerKind::UniqueBorrowed
2331                             } else {
2332                                 PointerKind::Shared
2333                             }
2334                         }
2335                     }
2336                 };
2337
2338                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2339                     size: layout.size,
2340                     align: layout.align.abi,
2341                     safe: Some(kind),
2342                     address_space,
2343                 })
2344             }
2345
2346             _ => {
2347                 let mut data_variant = match this.variants {
2348                     // Within the discriminant field, only the niche itself is
2349                     // always initialized, so we only check for a pointer at its
2350                     // offset.
2351                     //
2352                     // If the niche is a pointer, it's either valid (according
2353                     // to its type), or null (which the niche field's scalar
2354                     // validity range encodes).  This allows using
2355                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2356                     // this will continue to work as long as we don't start
2357                     // using more niches than just null (e.g., the first page of
2358                     // the address space, or unaligned pointers).
2359                     Variants::Multiple {
2360                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2361                         tag_field,
2362                         ..
2363                     } if this.fields.offset(tag_field) == offset => {
2364                         Some(this.for_variant(cx, dataful_variant))
2365                     }
2366                     _ => Some(this),
2367                 };
2368
2369                 if let Some(variant) = data_variant {
2370                     // We're not interested in any unions.
2371                     if let FieldsShape::Union(_) = variant.fields {
2372                         data_variant = None;
2373                     }
2374                 }
2375
2376                 let mut result = None;
2377
2378                 if let Some(variant) = data_variant {
2379                     let ptr_end = offset + Pointer.size(cx);
2380                     for i in 0..variant.fields.count() {
2381                         let field_start = variant.fields.offset(i);
2382                         if field_start <= offset {
2383                             let field = variant.field(cx, i);
2384                             result = field.to_result().ok().and_then(|field| {
2385                                 if ptr_end <= field_start + field.size {
2386                                     // We found the right field, look inside it.
2387                                     let field_info =
2388                                         field.pointee_info_at(cx, offset - field_start);
2389                                     field_info
2390                                 } else {
2391                                     None
2392                                 }
2393                             });
2394                             if result.is_some() {
2395                                 break;
2396                             }
2397                         }
2398                     }
2399                 }
2400
2401                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2402                 if let Some(ref mut pointee) = result {
2403                     if let ty::Adt(def, _) = this.ty.kind() {
2404                         if def.is_box() && offset.bytes() == 0 {
2405                             pointee.safe = Some(PointerKind::UniqueOwned);
2406                         }
2407                     }
2408                 }
2409
2410                 result
2411             }
2412         };
2413
2414         debug!(
2415             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2416             offset,
2417             this.ty.kind(),
2418             pointee_info
2419         );
2420
2421         pointee_info
2422     }
2423 }
2424
2425 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2426     #[inline]
2427     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2428         use crate::ty::layout::LayoutError::*;
2429         mem::discriminant(self).hash_stable(hcx, hasher);
2430
2431         match *self {
2432             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2433         }
2434     }
2435 }
2436
2437 impl<'tcx> ty::Instance<'tcx> {
2438     // NOTE(eddyb) this is private to avoid using it from outside of
2439     // `FnAbi::of_instance` - any other uses are either too high-level
2440     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2441     // or should go through `FnAbi` instead, to avoid losing any
2442     // adjustments `FnAbi::of_instance` might be performing.
2443     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2444         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2445         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2446         match *ty.kind() {
2447             ty::FnDef(..) => {
2448                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2449                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2450                 // (i.e. due to being inside a projection that got normalized, see
2451                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2452                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2453                 let mut sig = match *ty.kind() {
2454                     ty::FnDef(def_id, substs) => tcx
2455                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2456                         .subst(tcx, substs),
2457                     _ => unreachable!(),
2458                 };
2459
2460                 if let ty::InstanceDef::VtableShim(..) = self.def {
2461                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2462                     sig = sig.map_bound(|mut sig| {
2463                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2464                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2465                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2466                         sig
2467                     });
2468                 }
2469                 sig
2470             }
2471             ty::Closure(def_id, substs) => {
2472                 let sig = substs.as_closure().sig();
2473
2474                 let bound_vars = tcx.mk_bound_variable_kinds(
2475                     sig.bound_vars()
2476                         .iter()
2477                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2478                 );
2479                 let br = ty::BoundRegion {
2480                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2481                     kind: ty::BoundRegionKind::BrEnv,
2482                 };
2483                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2484                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2485
2486                 let sig = sig.skip_binder();
2487                 ty::Binder::bind_with_vars(
2488                     tcx.mk_fn_sig(
2489                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2490                         sig.output(),
2491                         sig.c_variadic,
2492                         sig.unsafety,
2493                         sig.abi,
2494                     ),
2495                     bound_vars,
2496                 )
2497             }
2498             ty::Generator(_, substs, _) => {
2499                 let sig = substs.as_generator().poly_sig();
2500
2501                 let bound_vars = tcx.mk_bound_variable_kinds(
2502                     sig.bound_vars()
2503                         .iter()
2504                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2505                 );
2506                 let br = ty::BoundRegion {
2507                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2508                     kind: ty::BoundRegionKind::BrEnv,
2509                 };
2510                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2511                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2512
2513                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2514                 let pin_adt_ref = tcx.adt_def(pin_did);
2515                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2516                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2517
2518                 let sig = sig.skip_binder();
2519                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2520                 let state_adt_ref = tcx.adt_def(state_did);
2521                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2522                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2523                 ty::Binder::bind_with_vars(
2524                     tcx.mk_fn_sig(
2525                         [env_ty, sig.resume_ty].iter(),
2526                         &ret_ty,
2527                         false,
2528                         hir::Unsafety::Normal,
2529                         rustc_target::spec::abi::Abi::Rust,
2530                     ),
2531                     bound_vars,
2532                 )
2533             }
2534             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2535         }
2536     }
2537 }
2538
2539 pub trait FnAbiExt<'tcx, C>
2540 where
2541     C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2542         + HasDataLayout
2543         + HasTargetSpec
2544         + HasTyCtxt<'tcx>
2545         + HasParamEnv<'tcx>,
2546 {
2547     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2548     ///
2549     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2550     /// instead, where the instance is an `InstanceDef::Virtual`.
2551     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2552
2553     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2554     /// direct calls to an `fn`.
2555     ///
2556     /// NB: that includes virtual calls, which are represented by "direct calls"
2557     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2558     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2559
2560     fn new_internal(
2561         cx: &C,
2562         sig: ty::PolyFnSig<'tcx>,
2563         extra_args: &[Ty<'tcx>],
2564         caller_location: Option<Ty<'tcx>>,
2565         codegen_fn_attr_flags: CodegenFnAttrFlags,
2566         make_self_ptr_thin: bool,
2567     ) -> Self;
2568     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2569 }
2570
2571 /// Calculates whether a function's ABI can unwind or not.
2572 ///
2573 /// This takes two primary parameters:
2574 ///
2575 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2576 ///   codegen attrs for a defined function. For function pointers this set of
2577 ///   flags is the empty set. This is only applicable for Rust-defined
2578 ///   functions, and generally isn't needed except for small optimizations where
2579 ///   we try to say a function which otherwise might look like it could unwind
2580 ///   doesn't actually unwind (such as for intrinsics and such).
2581 ///
2582 /// * `abi` - this is the ABI that the function is defined with. This is the
2583 ///   primary factor for determining whether a function can unwind or not.
2584 ///
2585 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2586 /// panics are implemented with unwinds on most platform (when
2587 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2588 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2589 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2590 /// defined for each ABI individually, but it always corresponds to some form of
2591 /// stack-based unwinding (the exact mechanism of which varies
2592 /// platform-by-platform).
2593 ///
2594 /// Rust functions are classfied whether or not they can unwind based on the
2595 /// active "panic strategy". In other words Rust functions are considered to
2596 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2597 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2598 /// only if the final panic mode is panic=abort. In this scenario any code
2599 /// previously compiled assuming that a function can unwind is still correct, it
2600 /// just never happens to actually unwind at runtime.
2601 ///
2602 /// This function's answer to whether or not a function can unwind is quite
2603 /// impactful throughout the compiler. This affects things like:
2604 ///
2605 /// * Calling a function which can't unwind means codegen simply ignores any
2606 ///   associated unwinding cleanup.
2607 /// * Calling a function which can unwind from a function which can't unwind
2608 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2609 ///   aborts the process.
2610 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2611 ///   affects various optimizations and codegen.
2612 ///
2613 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2614 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2615 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2616 /// might (from a foreign exception or similar).
2617 #[inline]
2618 pub fn fn_can_unwind(
2619     tcx: TyCtxt<'tcx>,
2620     codegen_fn_attr_flags: CodegenFnAttrFlags,
2621     abi: SpecAbi,
2622 ) -> bool {
2623     // Special attribute for functions which can't unwind.
2624     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2625         return false;
2626     }
2627
2628     // Otherwise if this isn't special then unwinding is generally determined by
2629     // the ABI of the itself. ABIs like `C` have variants which also
2630     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2631     // ABIs have such an option. Otherwise the only other thing here is Rust
2632     // itself, and those ABIs are determined by the panic strategy configured
2633     // for this compilation.
2634     //
2635     // Unfortunately at this time there's also another caveat. Rust [RFC
2636     // 2945][rfc] has been accepted and is in the process of being implemented
2637     // and stabilized. In this interim state we need to deal with historical
2638     // rustc behavior as well as plan for future rustc behavior.
2639     //
2640     // Historically functions declared with `extern "C"` were marked at the
2641     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2642     // or not. This is UB for functions in `panic=unwind` mode that then
2643     // actually panic and unwind. Note that this behavior is true for both
2644     // externally declared functions as well as Rust-defined function.
2645     //
2646     // To fix this UB rustc would like to change in the future to catch unwinds
2647     // from function calls that may unwind within a Rust-defined `extern "C"`
2648     // function and forcibly abort the process, thereby respecting the
2649     // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2650     // ready to roll out, so determining whether or not the `C` family of ABIs
2651     // unwinds is conditional not only on their definition but also whether the
2652     // `#![feature(c_unwind)]` feature gate is active.
2653     //
2654     // Note that this means that unlike historical compilers rustc now, by
2655     // default, unconditionally thinks that the `C` ABI may unwind. This will
2656     // prevent some optimization opportunities, however, so we try to scope this
2657     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2658     // to `panic=abort`).
2659     //
2660     // Eventually the check against `c_unwind` here will ideally get removed and
2661     // this'll be a little cleaner as it'll be a straightforward check of the
2662     // ABI.
2663     //
2664     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2665     use SpecAbi::*;
2666     match abi {
2667         C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
2668             unwind
2669                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2670         }
2671         Cdecl
2672         | Fastcall
2673         | Vectorcall
2674         | Aapcs
2675         | Win64
2676         | SysV64
2677         | PtxKernel
2678         | Msp430Interrupt
2679         | X86Interrupt
2680         | AmdGpuKernel
2681         | EfiApi
2682         | AvrInterrupt
2683         | AvrNonBlockingInterrupt
2684         | CCmseNonSecureCall
2685         | Wasm
2686         | RustIntrinsic
2687         | PlatformIntrinsic
2688         | Unadjusted => false,
2689         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2690     }
2691 }
2692
2693 #[inline]
2694 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2695     use rustc_target::spec::abi::Abi::*;
2696     match tcx.sess.target.adjust_abi(abi) {
2697         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2698
2699         // It's the ABI's job to select this, not ours.
2700         System { .. } => bug!("system abi should be selected elsewhere"),
2701         EfiApi => bug!("eficall abi should be selected elsewhere"),
2702
2703         Stdcall { .. } => Conv::X86Stdcall,
2704         Fastcall => Conv::X86Fastcall,
2705         Vectorcall => Conv::X86VectorCall,
2706         Thiscall { .. } => Conv::X86ThisCall,
2707         C { .. } => Conv::C,
2708         Unadjusted => Conv::C,
2709         Win64 => Conv::X86_64Win64,
2710         SysV64 => Conv::X86_64SysV,
2711         Aapcs => Conv::ArmAapcs,
2712         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2713         PtxKernel => Conv::PtxKernel,
2714         Msp430Interrupt => Conv::Msp430Intr,
2715         X86Interrupt => Conv::X86Intr,
2716         AmdGpuKernel => Conv::AmdGpuKernel,
2717         AvrInterrupt => Conv::AvrInterrupt,
2718         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2719         Wasm => Conv::C,
2720
2721         // These API constants ought to be more specific...
2722         Cdecl => Conv::C,
2723     }
2724 }
2725
2726 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2727 where
2728     C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2729         + HasDataLayout
2730         + HasTargetSpec
2731         + HasTyCtxt<'tcx>
2732         + HasParamEnv<'tcx>,
2733 {
2734     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2735         call::FnAbi::new_internal(cx, sig, extra_args, None, CodegenFnAttrFlags::empty(), false)
2736     }
2737
2738     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2739         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2740
2741         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2742             Some(cx.tcx().caller_location_ty())
2743         } else {
2744             None
2745         };
2746
2747         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2748
2749         call::FnAbi::new_internal(
2750             cx,
2751             sig,
2752             extra_args,
2753             caller_location,
2754             attrs,
2755             matches!(instance.def, ty::InstanceDef::Virtual(..)),
2756         )
2757     }
2758
2759     fn new_internal(
2760         cx: &C,
2761         sig: ty::PolyFnSig<'tcx>,
2762         extra_args: &[Ty<'tcx>],
2763         caller_location: Option<Ty<'tcx>>,
2764         codegen_fn_attr_flags: CodegenFnAttrFlags,
2765         force_thin_self_ptr: bool,
2766     ) -> Self {
2767         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2768
2769         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2770
2771         let conv = conv_from_spec_abi(cx.tcx(), sig.abi);
2772
2773         let mut inputs = sig.inputs();
2774         let extra_args = if sig.abi == RustCall {
2775             assert!(!sig.c_variadic && extra_args.is_empty());
2776
2777             if let Some(input) = sig.inputs().last() {
2778                 if let ty::Tuple(tupled_arguments) = input.kind() {
2779                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2780                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2781                 } else {
2782                     bug!(
2783                         "argument to function with \"rust-call\" ABI \
2784                             is not a tuple"
2785                     );
2786                 }
2787             } else {
2788                 bug!(
2789                     "argument to function with \"rust-call\" ABI \
2790                         is not a tuple"
2791                 );
2792             }
2793         } else {
2794             assert!(sig.c_variadic || extra_args.is_empty());
2795             extra_args.to_vec()
2796         };
2797
2798         let target = &cx.tcx().sess.target;
2799         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2800         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2801         let linux_s390x_gnu_like =
2802             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2803         let linux_sparc64_gnu_like =
2804             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2805         let linux_powerpc_gnu_like =
2806             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2807         use SpecAbi::*;
2808         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2809
2810         // Handle safe Rust thin and fat pointers.
2811         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2812                                       scalar: &Scalar,
2813                                       layout: TyAndLayout<'tcx>,
2814                                       offset: Size,
2815                                       is_return: bool| {
2816             // Booleans are always an i1 that needs to be zero-extended.
2817             if scalar.is_bool() {
2818                 attrs.ext(ArgExtension::Zext);
2819                 return;
2820             }
2821
2822             // Only pointer types handled below.
2823             if scalar.value != Pointer {
2824                 return;
2825             }
2826
2827             if !scalar.valid_range.contains_zero() {
2828                 attrs.set(ArgAttribute::NonNull);
2829             }
2830
2831             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2832                 if let Some(kind) = pointee.safe {
2833                     attrs.pointee_align = Some(pointee.align);
2834
2835                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2836                     // for the entire duration of the function as they can be deallocated
2837                     // at any time. Set their valid size to 0.
2838                     attrs.pointee_size = match kind {
2839                         PointerKind::UniqueOwned => Size::ZERO,
2840                         _ => pointee.size,
2841                     };
2842
2843                     // `Box` pointer parameters never alias because ownership is transferred
2844                     // `&mut` pointer parameters never alias other parameters,
2845                     // or mutable global data
2846                     //
2847                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2848                     // and can be marked as both `readonly` and `noalias`, as
2849                     // LLVM's definition of `noalias` is based solely on memory
2850                     // dependencies rather than pointer equality
2851                     //
2852                     // Due to miscompiles in LLVM < 12, we apply a separate NoAliasMutRef attribute
2853                     // for UniqueBorrowed arguments, so that the codegen backend can decide
2854                     // whether or not to actually emit the attribute.
2855                     let no_alias = match kind {
2856                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
2857                         PointerKind::UniqueOwned => true,
2858                         PointerKind::Frozen => !is_return,
2859                     };
2860                     if no_alias {
2861                         attrs.set(ArgAttribute::NoAlias);
2862                     }
2863
2864                     if kind == PointerKind::Frozen && !is_return {
2865                         attrs.set(ArgAttribute::ReadOnly);
2866                     }
2867
2868                     if kind == PointerKind::UniqueBorrowed && !is_return {
2869                         attrs.set(ArgAttribute::NoAliasMutRef);
2870                     }
2871                 }
2872             }
2873         };
2874
2875         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2876             let is_return = arg_idx.is_none();
2877
2878             let layout = cx.layout_of(ty);
2879             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2880                 // Don't pass the vtable, it's not an argument of the virtual fn.
2881                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2882                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2883                 make_thin_self_ptr(cx, layout)
2884             } else {
2885                 layout
2886             };
2887
2888             let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2889                 let mut attrs = ArgAttributes::new();
2890                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2891                 attrs
2892             });
2893
2894             if arg.layout.is_zst() {
2895                 // For some forsaken reason, x86_64-pc-windows-gnu
2896                 // doesn't ignore zero-sized struct arguments.
2897                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2898                 if is_return
2899                     || rust_abi
2900                     || (!win_x64_gnu
2901                         && !linux_s390x_gnu_like
2902                         && !linux_sparc64_gnu_like
2903                         && !linux_powerpc_gnu_like)
2904                 {
2905                     arg.mode = PassMode::Ignore;
2906                 }
2907             }
2908
2909             arg
2910         };
2911
2912         let mut fn_abi = FnAbi {
2913             ret: arg_of(sig.output(), None),
2914             args: inputs
2915                 .iter()
2916                 .cloned()
2917                 .chain(extra_args)
2918                 .chain(caller_location)
2919                 .enumerate()
2920                 .map(|(i, ty)| arg_of(ty, Some(i)))
2921                 .collect(),
2922             c_variadic: sig.c_variadic,
2923             fixed_count: inputs.len(),
2924             conv,
2925             can_unwind: fn_can_unwind(cx.tcx(), codegen_fn_attr_flags, sig.abi),
2926         };
2927         fn_abi.adjust_for_abi(cx, sig.abi);
2928         debug!("FnAbi::new_internal = {:?}", fn_abi);
2929         fn_abi
2930     }
2931
2932     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2933         if abi == SpecAbi::Unadjusted {
2934             return;
2935         }
2936
2937         if abi == SpecAbi::Rust
2938             || abi == SpecAbi::RustCall
2939             || abi == SpecAbi::RustIntrinsic
2940             || abi == SpecAbi::PlatformIntrinsic
2941         {
2942             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2943                 if arg.is_ignore() {
2944                     return;
2945                 }
2946
2947                 match arg.layout.abi {
2948                     Abi::Aggregate { .. } => {}
2949
2950                     // This is a fun case! The gist of what this is doing is
2951                     // that we want callers and callees to always agree on the
2952                     // ABI of how they pass SIMD arguments. If we were to *not*
2953                     // make these arguments indirect then they'd be immediates
2954                     // in LLVM, which means that they'd used whatever the
2955                     // appropriate ABI is for the callee and the caller. That
2956                     // means, for example, if the caller doesn't have AVX
2957                     // enabled but the callee does, then passing an AVX argument
2958                     // across this boundary would cause corrupt data to show up.
2959                     //
2960                     // This problem is fixed by unconditionally passing SIMD
2961                     // arguments through memory between callers and callees
2962                     // which should get them all to agree on ABI regardless of
2963                     // target feature sets. Some more information about this
2964                     // issue can be found in #44367.
2965                     //
2966                     // Note that the platform intrinsic ABI is exempt here as
2967                     // that's how we connect up to LLVM and it's unstable
2968                     // anyway, we control all calls to it in libstd.
2969                     Abi::Vector { .. }
2970                         if abi != SpecAbi::PlatformIntrinsic
2971                             && cx.tcx().sess.target.simd_types_indirect =>
2972                     {
2973                         arg.make_indirect();
2974                         return;
2975                     }
2976
2977                     _ => return,
2978                 }
2979
2980                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2981                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2982                 let max_by_val_size = Pointer.size(cx) * 2;
2983                 let size = arg.layout.size;
2984
2985                 if arg.layout.is_unsized() || size > max_by_val_size {
2986                     arg.make_indirect();
2987                 } else {
2988                     // We want to pass small aggregates as immediates, but using
2989                     // a LLVM aggregate type for this leads to bad optimizations,
2990                     // so we pick an appropriately sized integer type instead.
2991                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2992                 }
2993             };
2994             fixup(&mut self.ret);
2995             for arg in &mut self.args {
2996                 fixup(arg);
2997             }
2998             return;
2999         }
3000
3001         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
3002             cx.tcx().sess.fatal(&msg);
3003         }
3004     }
3005 }
3006
3007 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
3008 where
3009     C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
3010         + HasTyCtxt<'tcx>
3011         + HasParamEnv<'tcx>,
3012 {
3013     let fat_pointer_ty = if layout.is_unsized() {
3014         // unsized `self` is passed as a pointer to `self`
3015         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3016         cx.tcx().mk_mut_ptr(layout.ty)
3017     } else {
3018         match layout.abi {
3019             Abi::ScalarPair(..) => (),
3020             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3021         }
3022
3023         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3024         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3025         // elsewhere in the compiler as a method on a `dyn Trait`.
3026         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3027         // get a built-in pointer type
3028         let mut fat_pointer_layout = layout;
3029         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3030             && !fat_pointer_layout.ty.is_region_ptr()
3031         {
3032             for i in 0..fat_pointer_layout.fields.count() {
3033                 let field_layout = fat_pointer_layout.field(cx, i);
3034
3035                 if !field_layout.is_zst() {
3036                     fat_pointer_layout = field_layout;
3037                     continue 'descend_newtypes;
3038                 }
3039             }
3040
3041             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3042         }
3043
3044         fat_pointer_layout.ty
3045     };
3046
3047     // we now have a type like `*mut RcBox<dyn Trait>`
3048     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3049     // this is understood as a special case elsewhere in the compiler
3050     let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
3051     layout = cx.layout_of(unit_pointer_ty);
3052     layout.ty = fat_pointer_ty;
3053     layout
3054 }