]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #88202 - azdavis:master, r=jyn514
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast as ast;
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
35     fn repr_discr<'tcx>(
36         tcx: TyCtxt<'tcx>,
37         ty: Ty<'tcx>,
38         repr: &ReprOptions,
39         min: i128,
40         max: i128,
41     ) -> (Integer, bool);
42 }
43
44 impl IntegerExt for Integer {
45     #[inline]
46     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
47         match (*self, signed) {
48             (I8, false) => tcx.types.u8,
49             (I16, false) => tcx.types.u16,
50             (I32, false) => tcx.types.u32,
51             (I64, false) => tcx.types.u64,
52             (I128, false) => tcx.types.u128,
53             (I8, true) => tcx.types.i8,
54             (I16, true) => tcx.types.i16,
55             (I32, true) => tcx.types.i32,
56             (I64, true) => tcx.types.i64,
57             (I128, true) => tcx.types.i128,
58         }
59     }
60
61     /// Gets the Integer type from an attr::IntType.
62     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
63         let dl = cx.data_layout();
64
65         match ity {
66             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
67             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
68             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
69             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
70             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
71             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
72                 dl.ptr_sized_integer()
73             }
74         }
75     }
76
77     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
78         match ity {
79             ty::IntTy::I8 => I8,
80             ty::IntTy::I16 => I16,
81             ty::IntTy::I32 => I32,
82             ty::IntTy::I64 => I64,
83             ty::IntTy::I128 => I128,
84             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
85         }
86     }
87     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
88         match ity {
89             ty::UintTy::U8 => I8,
90             ty::UintTy::U16 => I16,
91             ty::UintTy::U32 => I32,
92             ty::UintTy::U64 => I64,
93             ty::UintTy::U128 => I128,
94             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
95         }
96     }
97
98     /// Finds the appropriate Integer type and signedness for the given
99     /// signed discriminant range and `#[repr]` attribute.
100     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
101     /// that shouldn't affect anything, other than maybe debuginfo.
102     fn repr_discr<'tcx>(
103         tcx: TyCtxt<'tcx>,
104         ty: Ty<'tcx>,
105         repr: &ReprOptions,
106         min: i128,
107         max: i128,
108     ) -> (Integer, bool) {
109         // Theoretically, negative values could be larger in unsigned representation
110         // than the unsigned representation of the signed minimum. However, if there
111         // are any negative values, the only valid unsigned representation is u128
112         // which can fit all i128 values, so the result remains unaffected.
113         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
114         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
115
116         if let Some(ity) = repr.int {
117             let discr = Integer::from_attr(&tcx, ity);
118             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
119             if discr < fit {
120                 bug!(
121                     "Integer::repr_discr: `#[repr]` hint too small for \
122                       discriminant range of enum `{}",
123                     ty
124                 )
125             }
126             return (discr, ity.is_signed());
127         }
128
129         let at_least = if repr.c() {
130             // This is usually I32, however it can be different on some platforms,
131             // notably hexagon and arm-none/thumb-none
132             tcx.data_layout().c_enum_min_size
133         } else {
134             // repr(Rust) enums try to be as small as possible
135             I8
136         };
137
138         // If there are no negative values, we can use the unsigned fit.
139         if min >= 0 {
140             (cmp::max(unsigned_fit, at_least), false)
141         } else {
142             (cmp::max(signed_fit, at_least), true)
143         }
144     }
145 }
146
147 pub trait PrimitiveExt {
148     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
149     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
150 }
151
152 impl PrimitiveExt for Primitive {
153     #[inline]
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
155         match *self {
156             Int(i, signed) => i.to_ty(tcx, signed),
157             F32 => tcx.types.f32,
158             F64 => tcx.types.f64,
159             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
160         }
161     }
162
163     /// Return an *integer* type matching this primitive.
164     /// Useful in particular when dealing with enum discriminants.
165     #[inline]
166     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
167         match *self {
168             Int(i, signed) => i.to_ty(tcx, signed),
169             Pointer => tcx.types.usize,
170             F32 | F64 => bug!("floats do not have an int type"),
171         }
172     }
173 }
174
175 /// The first half of a fat pointer.
176 ///
177 /// - For a trait object, this is the address of the box.
178 /// - For a slice, this is the base address.
179 pub const FAT_PTR_ADDR: usize = 0;
180
181 /// The second half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the vtable.
184 /// - For a slice, this is the length.
185 pub const FAT_PTR_EXTRA: usize = 1;
186
187 /// The maximum supported number of lanes in a SIMD vector.
188 ///
189 /// This value is selected based on backend support:
190 /// * LLVM does not appear to have a vector width limit.
191 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
192 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
193
194 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
195 pub enum LayoutError<'tcx> {
196     Unknown(Ty<'tcx>),
197     SizeOverflow(Ty<'tcx>),
198 }
199
200 impl<'tcx> fmt::Display for LayoutError<'tcx> {
201     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
202         match *self {
203             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
204             LayoutError::SizeOverflow(ty) => {
205                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
206             }
207         }
208     }
209 }
210
211 fn layout_of<'tcx>(
212     tcx: TyCtxt<'tcx>,
213     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
214 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
215     ty::tls::with_related_context(tcx, move |icx| {
216         let (param_env, ty) = query.into_parts();
217
218         if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
219             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
220         }
221
222         // Update the ImplicitCtxt to increase the layout_depth
223         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
224
225         ty::tls::enter_context(&icx, |_| {
226             let param_env = param_env.with_reveal_all_normalized(tcx);
227             let unnormalized_ty = ty;
228             let ty = tcx.normalize_erasing_regions(param_env, ty);
229             if ty != unnormalized_ty {
230                 // Ensure this layout is also cached for the normalized type.
231                 return tcx.layout_of(param_env.and(ty));
232             }
233
234             let cx = LayoutCx { tcx, param_env };
235
236             let layout = cx.layout_of_uncached(ty)?;
237             let layout = TyAndLayout { ty, layout };
238
239             cx.record_layout_for_printing(layout);
240
241             // Type-level uninhabitedness should always imply ABI uninhabitedness.
242             if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
243                 assert!(layout.abi.is_uninhabited());
244             }
245
246             Ok(layout)
247         })
248     })
249 }
250
251 pub fn provide(providers: &mut ty::query::Providers) {
252     *providers = ty::query::Providers { layout_of, ..*providers };
253 }
254
255 pub struct LayoutCx<'tcx, C> {
256     pub tcx: C,
257     pub param_env: ty::ParamEnv<'tcx>,
258 }
259
260 #[derive(Copy, Clone, Debug)]
261 enum StructKind {
262     /// A tuple, closure, or univariant which cannot be coerced to unsized.
263     AlwaysSized,
264     /// A univariant, the last field of which may be coerced to unsized.
265     MaybeUnsized,
266     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
267     Prefixed(Size, Align),
268 }
269
270 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
271 // This is used to go between `memory_index` (source field order to memory order)
272 // and `inverse_memory_index` (memory order to source field order).
273 // See also `FieldsShape::Arbitrary::memory_index` for more details.
274 // FIXME(eddyb) build a better abstraction for permutations, if possible.
275 fn invert_mapping(map: &[u32]) -> Vec<u32> {
276     let mut inverse = vec![0; map.len()];
277     for i in 0..map.len() {
278         inverse[map[i] as usize] = i as u32;
279     }
280     inverse
281 }
282
283 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
284     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
285         let dl = self.data_layout();
286         let b_align = b.value.align(dl);
287         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
288         let b_offset = a.value.size(dl).align_to(b_align.abi);
289         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
290
291         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
292         // returns the last maximum.
293         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
294             .into_iter()
295             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
296             .max_by_key(|niche| niche.available(dl));
297
298         Layout {
299             variants: Variants::Single { index: VariantIdx::new(0) },
300             fields: FieldsShape::Arbitrary {
301                 offsets: vec![Size::ZERO, b_offset],
302                 memory_index: vec![0, 1],
303             },
304             abi: Abi::ScalarPair(a, b),
305             largest_niche,
306             align,
307             size,
308         }
309     }
310
311     fn univariant_uninterned(
312         &self,
313         ty: Ty<'tcx>,
314         fields: &[TyAndLayout<'_>],
315         repr: &ReprOptions,
316         kind: StructKind,
317     ) -> Result<Layout, LayoutError<'tcx>> {
318         let dl = self.data_layout();
319         let pack = repr.pack;
320         if pack.is_some() && repr.align.is_some() {
321             self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
322             return Err(LayoutError::Unknown(ty));
323         }
324
325         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
326
327         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
328
329         let optimize = !repr.inhibit_struct_field_reordering_opt();
330         if optimize {
331             let end =
332                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
333             let optimizing = &mut inverse_memory_index[..end];
334             let field_align = |f: &TyAndLayout<'_>| {
335                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
336             };
337             match kind {
338                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
339                     optimizing.sort_by_key(|&x| {
340                         // Place ZSTs first to avoid "interesting offsets",
341                         // especially with only one or two non-ZST fields.
342                         let f = &fields[x as usize];
343                         (!f.is_zst(), cmp::Reverse(field_align(f)))
344                     });
345                 }
346                 StructKind::Prefixed(..) => {
347                     // Sort in ascending alignment so that the layout stay optimal
348                     // regardless of the prefix
349                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
350                 }
351             }
352         }
353
354         // inverse_memory_index holds field indices by increasing memory offset.
355         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
356         // We now write field offsets to the corresponding offset slot;
357         // field 5 with offset 0 puts 0 in offsets[5].
358         // At the bottom of this function, we invert `inverse_memory_index` to
359         // produce `memory_index` (see `invert_mapping`).
360
361         let mut sized = true;
362         let mut offsets = vec![Size::ZERO; fields.len()];
363         let mut offset = Size::ZERO;
364         let mut largest_niche = None;
365         let mut largest_niche_available = 0;
366
367         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
368             let prefix_align =
369                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
370             align = align.max(AbiAndPrefAlign::new(prefix_align));
371             offset = prefix_size.align_to(prefix_align);
372         }
373
374         for &i in &inverse_memory_index {
375             let field = fields[i as usize];
376             if !sized {
377                 self.tcx.sess.delay_span_bug(
378                     DUMMY_SP,
379                     &format!(
380                         "univariant: field #{} of `{}` comes after unsized field",
381                         offsets.len(),
382                         ty
383                     ),
384                 );
385             }
386
387             if field.is_unsized() {
388                 sized = false;
389             }
390
391             // Invariant: offset < dl.obj_size_bound() <= 1<<61
392             let field_align = if let Some(pack) = pack {
393                 field.align.min(AbiAndPrefAlign::new(pack))
394             } else {
395                 field.align
396             };
397             offset = offset.align_to(field_align.abi);
398             align = align.max(field_align);
399
400             debug!("univariant offset: {:?} field: {:#?}", offset, field);
401             offsets[i as usize] = offset;
402
403             if !repr.hide_niche() {
404                 if let Some(mut niche) = field.largest_niche.clone() {
405                     let available = niche.available(dl);
406                     if available > largest_niche_available {
407                         largest_niche_available = available;
408                         niche.offset += offset;
409                         largest_niche = Some(niche);
410                     }
411                 }
412             }
413
414             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
415         }
416
417         if let Some(repr_align) = repr.align {
418             align = align.max(AbiAndPrefAlign::new(repr_align));
419         }
420
421         debug!("univariant min_size: {:?}", offset);
422         let min_size = offset;
423
424         // As stated above, inverse_memory_index holds field indices by increasing offset.
425         // This makes it an already-sorted view of the offsets vec.
426         // To invert it, consider:
427         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
428         // Field 5 would be the first element, so memory_index is i:
429         // Note: if we didn't optimize, it's already right.
430
431         let memory_index =
432             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
433
434         let size = min_size.align_to(align.abi);
435         let mut abi = Abi::Aggregate { sized };
436
437         // Unpack newtype ABIs and find scalar pairs.
438         if sized && size.bytes() > 0 {
439             // All other fields must be ZSTs.
440             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
441
442             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
443                 // We have exactly one non-ZST field.
444                 (Some((i, field)), None, None) => {
445                     // Field fills the struct and it has a scalar or scalar pair ABI.
446                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
447                     {
448                         match field.abi {
449                             // For plain scalars, or vectors of them, we can't unpack
450                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
451                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
452                                 abi = field.abi.clone();
453                             }
454                             // But scalar pairs are Rust-specific and get
455                             // treated as aggregates by C ABIs anyway.
456                             Abi::ScalarPair(..) => {
457                                 abi = field.abi.clone();
458                             }
459                             _ => {}
460                         }
461                     }
462                 }
463
464                 // Two non-ZST fields, and they're both scalars.
465                 (
466                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
467                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
468                     None,
469                 ) => {
470                     // Order by the memory placement, not source order.
471                     let ((i, a), (j, b)) =
472                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
473                     let pair = self.scalar_pair(a.clone(), b.clone());
474                     let pair_offsets = match pair.fields {
475                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
476                             assert_eq!(memory_index, &[0, 1]);
477                             offsets
478                         }
479                         _ => bug!(),
480                     };
481                     if offsets[i] == pair_offsets[0]
482                         && offsets[j] == pair_offsets[1]
483                         && align == pair.align
484                         && size == pair.size
485                     {
486                         // We can use `ScalarPair` only when it matches our
487                         // already computed layout (including `#[repr(C)]`).
488                         abi = pair.abi;
489                     }
490                 }
491
492                 _ => {}
493             }
494         }
495
496         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
497             abi = Abi::Uninhabited;
498         }
499
500         Ok(Layout {
501             variants: Variants::Single { index: VariantIdx::new(0) },
502             fields: FieldsShape::Arbitrary { offsets, memory_index },
503             abi,
504             largest_niche,
505             align,
506             size,
507         })
508     }
509
510     fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
511         let tcx = self.tcx;
512         let param_env = self.param_env;
513         let dl = self.data_layout();
514         let scalar_unit = |value: Primitive| {
515             let bits = value.size(dl).bits();
516             assert!(bits <= 128);
517             Scalar { value, valid_range: WrappingRange { start: 0, end: (!0 >> (128 - bits)) } }
518         };
519         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
520
521         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
522             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
523         };
524         debug_assert!(!ty.has_infer_types_or_consts());
525
526         Ok(match *ty.kind() {
527             // Basic scalars.
528             ty::Bool => tcx.intern_layout(Layout::scalar(
529                 self,
530                 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
531             )),
532             ty::Char => tcx.intern_layout(Layout::scalar(
533                 self,
534                 Scalar {
535                     value: Int(I32, false),
536                     valid_range: WrappingRange { start: 0, end: 0x10FFFF },
537                 },
538             )),
539             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
540             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
541             ty::Float(fty) => scalar(match fty {
542                 ty::FloatTy::F32 => F32,
543                 ty::FloatTy::F64 => F64,
544             }),
545             ty::FnPtr(_) => {
546                 let mut ptr = scalar_unit(Pointer);
547                 ptr.valid_range = ptr.valid_range.with_start(1);
548                 tcx.intern_layout(Layout::scalar(self, ptr))
549             }
550
551             // The never type.
552             ty::Never => tcx.intern_layout(Layout {
553                 variants: Variants::Single { index: VariantIdx::new(0) },
554                 fields: FieldsShape::Primitive,
555                 abi: Abi::Uninhabited,
556                 largest_niche: None,
557                 align: dl.i8_align,
558                 size: Size::ZERO,
559             }),
560
561             // Potentially-wide pointers.
562             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
563                 let mut data_ptr = scalar_unit(Pointer);
564                 if !ty.is_unsafe_ptr() {
565                     data_ptr.valid_range = data_ptr.valid_range.with_start(1);
566                 }
567
568                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
569                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
570                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
571                 }
572
573                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
574                 let metadata = match unsized_part.kind() {
575                     ty::Foreign(..) => {
576                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
577                     }
578                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
579                     ty::Dynamic(..) => {
580                         let mut vtable = scalar_unit(Pointer);
581                         vtable.valid_range = vtable.valid_range.with_start(1);
582                         vtable
583                     }
584                     _ => return Err(LayoutError::Unknown(unsized_part)),
585                 };
586
587                 // Effectively a (ptr, meta) tuple.
588                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
589             }
590
591             // Arrays and slices.
592             ty::Array(element, mut count) => {
593                 if count.has_projections() {
594                     count = tcx.normalize_erasing_regions(param_env, count);
595                     if count.has_projections() {
596                         return Err(LayoutError::Unknown(ty));
597                     }
598                 }
599
600                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
601                 let element = self.layout_of(element)?;
602                 let size =
603                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
604
605                 let abi =
606                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
607                         Abi::Uninhabited
608                     } else {
609                         Abi::Aggregate { sized: true }
610                     };
611
612                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
613
614                 tcx.intern_layout(Layout {
615                     variants: Variants::Single { index: VariantIdx::new(0) },
616                     fields: FieldsShape::Array { stride: element.size, count },
617                     abi,
618                     largest_niche,
619                     align: element.align,
620                     size,
621                 })
622             }
623             ty::Slice(element) => {
624                 let element = self.layout_of(element)?;
625                 tcx.intern_layout(Layout {
626                     variants: Variants::Single { index: VariantIdx::new(0) },
627                     fields: FieldsShape::Array { stride: element.size, count: 0 },
628                     abi: Abi::Aggregate { sized: false },
629                     largest_niche: None,
630                     align: element.align,
631                     size: Size::ZERO,
632                 })
633             }
634             ty::Str => tcx.intern_layout(Layout {
635                 variants: Variants::Single { index: VariantIdx::new(0) },
636                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
637                 abi: Abi::Aggregate { sized: false },
638                 largest_niche: None,
639                 align: dl.i8_align,
640                 size: Size::ZERO,
641             }),
642
643             // Odd unit types.
644             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
645             ty::Dynamic(..) | ty::Foreign(..) => {
646                 let mut unit = self.univariant_uninterned(
647                     ty,
648                     &[],
649                     &ReprOptions::default(),
650                     StructKind::AlwaysSized,
651                 )?;
652                 match unit.abi {
653                     Abi::Aggregate { ref mut sized } => *sized = false,
654                     _ => bug!(),
655                 }
656                 tcx.intern_layout(unit)
657             }
658
659             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
660
661             ty::Closure(_, ref substs) => {
662                 let tys = substs.as_closure().upvar_tys();
663                 univariant(
664                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
665                     &ReprOptions::default(),
666                     StructKind::AlwaysSized,
667                 )?
668             }
669
670             ty::Tuple(tys) => {
671                 let kind =
672                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
673
674                 univariant(
675                     &tys.iter()
676                         .map(|k| self.layout_of(k.expect_ty()))
677                         .collect::<Result<Vec<_>, _>>()?,
678                     &ReprOptions::default(),
679                     kind,
680                 )?
681             }
682
683             // SIMD vector types.
684             ty::Adt(def, substs) if def.repr.simd() => {
685                 if !def.is_struct() {
686                     // Should have yielded E0517 by now.
687                     tcx.sess.delay_span_bug(
688                         DUMMY_SP,
689                         "#[repr(simd)] was applied to an ADT that is not a struct",
690                     );
691                     return Err(LayoutError::Unknown(ty));
692                 }
693
694                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
695                 //
696                 // * #[repr(simd)] struct S(T, T, T, T);
697                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
698                 // * #[repr(simd)] struct S([T; 4])
699                 //
700                 // where T is a primitive scalar (integer/float/pointer).
701
702                 // SIMD vectors with zero fields are not supported.
703                 // (should be caught by typeck)
704                 if def.non_enum_variant().fields.is_empty() {
705                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
706                 }
707
708                 // Type of the first ADT field:
709                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
710
711                 // Heterogeneous SIMD vectors are not supported:
712                 // (should be caught by typeck)
713                 for fi in &def.non_enum_variant().fields {
714                     if fi.ty(tcx, substs) != f0_ty {
715                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
716                     }
717                 }
718
719                 // The element type and number of elements of the SIMD vector
720                 // are obtained from:
721                 //
722                 // * the element type and length of the single array field, if
723                 // the first field is of array type, or
724                 //
725                 // * the homogenous field type and the number of fields.
726                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
727                     // First ADT field is an array:
728
729                     // SIMD vectors with multiple array fields are not supported:
730                     // (should be caught by typeck)
731                     if def.non_enum_variant().fields.len() != 1 {
732                         tcx.sess.fatal(&format!(
733                             "monomorphising SIMD type `{}` with more than one array field",
734                             ty
735                         ));
736                     }
737
738                     // Extract the number of elements from the layout of the array field:
739                     let len = if let Ok(TyAndLayout {
740                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
741                         ..
742                     }) = self.layout_of(f0_ty)
743                     {
744                         count
745                     } else {
746                         return Err(LayoutError::Unknown(ty));
747                     };
748
749                     (*e_ty, *len, true)
750                 } else {
751                     // First ADT field is not an array:
752                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
753                 };
754
755                 // SIMD vectors of zero length are not supported.
756                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
757                 // support.
758                 //
759                 // Can't be caught in typeck if the array length is generic.
760                 if e_len == 0 {
761                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
762                 } else if e_len > MAX_SIMD_LANES {
763                     tcx.sess.fatal(&format!(
764                         "monomorphising SIMD type `{}` of length greater than {}",
765                         ty, MAX_SIMD_LANES,
766                     ));
767                 }
768
769                 // Compute the ABI of the element type:
770                 let e_ly = self.layout_of(e_ty)?;
771                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
772                     scalar.clone()
773                 } else {
774                     // This error isn't caught in typeck, e.g., if
775                     // the element type of the vector is generic.
776                     tcx.sess.fatal(&format!(
777                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
778                         (integer/float/pointer) element type `{}`",
779                         ty, e_ty
780                     ))
781                 };
782
783                 // Compute the size and alignment of the vector:
784                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
785                 let align = dl.vector_align(size);
786                 let size = size.align_to(align.abi);
787
788                 // Compute the placement of the vector fields:
789                 let fields = if is_array {
790                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
791                 } else {
792                     FieldsShape::Array { stride: e_ly.size, count: e_len }
793                 };
794
795                 tcx.intern_layout(Layout {
796                     variants: Variants::Single { index: VariantIdx::new(0) },
797                     fields,
798                     abi: Abi::Vector { element: e_abi, count: e_len },
799                     largest_niche: e_ly.largest_niche.clone(),
800                     size,
801                     align,
802                 })
803             }
804
805             // ADTs.
806             ty::Adt(def, substs) => {
807                 // Cache the field layouts.
808                 let variants = def
809                     .variants
810                     .iter()
811                     .map(|v| {
812                         v.fields
813                             .iter()
814                             .map(|field| self.layout_of(field.ty(tcx, substs)))
815                             .collect::<Result<Vec<_>, _>>()
816                     })
817                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
818
819                 if def.is_union() {
820                     if def.repr.pack.is_some() && def.repr.align.is_some() {
821                         self.tcx.sess.delay_span_bug(
822                             tcx.def_span(def.did),
823                             "union cannot be packed and aligned",
824                         );
825                         return Err(LayoutError::Unknown(ty));
826                     }
827
828                     let mut align =
829                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
830
831                     if let Some(repr_align) = def.repr.align {
832                         align = align.max(AbiAndPrefAlign::new(repr_align));
833                     }
834
835                     let optimize = !def.repr.inhibit_union_abi_opt();
836                     let mut size = Size::ZERO;
837                     let mut abi = Abi::Aggregate { sized: true };
838                     let index = VariantIdx::new(0);
839                     for field in &variants[index] {
840                         assert!(!field.is_unsized());
841                         align = align.max(field.align);
842
843                         // If all non-ZST fields have the same ABI, forward this ABI
844                         if optimize && !field.is_zst() {
845                             // Normalize scalar_unit to the maximal valid range
846                             let field_abi = match &field.abi {
847                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
848                                 Abi::ScalarPair(x, y) => {
849                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
850                                 }
851                                 Abi::Vector { element: x, count } => {
852                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
853                                 }
854                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
855                                     Abi::Aggregate { sized: true }
856                                 }
857                             };
858
859                             if size == Size::ZERO {
860                                 // first non ZST: initialize 'abi'
861                                 abi = field_abi;
862                             } else if abi != field_abi {
863                                 // different fields have different ABI: reset to Aggregate
864                                 abi = Abi::Aggregate { sized: true };
865                             }
866                         }
867
868                         size = cmp::max(size, field.size);
869                     }
870
871                     if let Some(pack) = def.repr.pack {
872                         align = align.min(AbiAndPrefAlign::new(pack));
873                     }
874
875                     return Ok(tcx.intern_layout(Layout {
876                         variants: Variants::Single { index },
877                         fields: FieldsShape::Union(
878                             NonZeroUsize::new(variants[index].len())
879                                 .ok_or(LayoutError::Unknown(ty))?,
880                         ),
881                         abi,
882                         largest_niche: None,
883                         align,
884                         size: size.align_to(align.abi),
885                     }));
886                 }
887
888                 // A variant is absent if it's uninhabited and only has ZST fields.
889                 // Present uninhabited variants only require space for their fields,
890                 // but *not* an encoding of the discriminant (e.g., a tag value).
891                 // See issue #49298 for more details on the need to leave space
892                 // for non-ZST uninhabited data (mostly partial initialization).
893                 let absent = |fields: &[TyAndLayout<'_>]| {
894                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
895                     let is_zst = fields.iter().all(|f| f.is_zst());
896                     uninhabited && is_zst
897                 };
898                 let (present_first, present_second) = {
899                     let mut present_variants = variants
900                         .iter_enumerated()
901                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
902                     (present_variants.next(), present_variants.next())
903                 };
904                 let present_first = match present_first {
905                     Some(present_first) => present_first,
906                     // Uninhabited because it has no variants, or only absent ones.
907                     None if def.is_enum() => {
908                         return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
909                     }
910                     // If it's a struct, still compute a layout so that we can still compute the
911                     // field offsets.
912                     None => VariantIdx::new(0),
913                 };
914
915                 let is_struct = !def.is_enum() ||
916                     // Only one variant is present.
917                     (present_second.is_none() &&
918                     // Representation optimizations are allowed.
919                     !def.repr.inhibit_enum_layout_opt());
920                 if is_struct {
921                     // Struct, or univariant enum equivalent to a struct.
922                     // (Typechecking will reject discriminant-sizing attrs.)
923
924                     let v = present_first;
925                     let kind = if def.is_enum() || variants[v].is_empty() {
926                         StructKind::AlwaysSized
927                     } else {
928                         let param_env = tcx.param_env(def.did);
929                         let last_field = def.variants[v].fields.last().unwrap();
930                         let always_sized =
931                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
932                         if !always_sized {
933                             StructKind::MaybeUnsized
934                         } else {
935                             StructKind::AlwaysSized
936                         }
937                     };
938
939                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
940                     st.variants = Variants::Single { index: v };
941                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
942                     match st.abi {
943                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
944                             // the asserts ensure that we are not using the
945                             // `#[rustc_layout_scalar_valid_range(n)]`
946                             // attribute to widen the range of anything as that would probably
947                             // result in UB somewhere
948                             // FIXME(eddyb) the asserts are probably not needed,
949                             // as larger validity ranges would result in missed
950                             // optimizations, *not* wrongly assuming the inner
951                             // value is valid. e.g. unions enlarge validity ranges,
952                             // because the values may be uninitialized.
953                             if let Bound::Included(start) = start {
954                                 // FIXME(eddyb) this might be incorrect - it doesn't
955                                 // account for wrap-around (end < start) ranges.
956                                 assert!(scalar.valid_range.start <= start);
957                                 scalar.valid_range.start = start;
958                             }
959                             if let Bound::Included(end) = end {
960                                 // FIXME(eddyb) this might be incorrect - it doesn't
961                                 // account for wrap-around (end < start) ranges.
962                                 assert!(scalar.valid_range.end >= end);
963                                 scalar.valid_range.end = end;
964                             }
965
966                             // Update `largest_niche` if we have introduced a larger niche.
967                             let niche = if def.repr.hide_niche() {
968                                 None
969                             } else {
970                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
971                             };
972                             if let Some(niche) = niche {
973                                 match &st.largest_niche {
974                                     Some(largest_niche) => {
975                                         // Replace the existing niche even if they're equal,
976                                         // because this one is at a lower offset.
977                                         if largest_niche.available(dl) <= niche.available(dl) {
978                                             st.largest_niche = Some(niche);
979                                         }
980                                     }
981                                     None => st.largest_niche = Some(niche),
982                                 }
983                             }
984                         }
985                         _ => assert!(
986                             start == Bound::Unbounded && end == Bound::Unbounded,
987                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
988                             def,
989                             st,
990                         ),
991                     }
992
993                     return Ok(tcx.intern_layout(st));
994                 }
995
996                 // At this point, we have handled all unions and
997                 // structs. (We have also handled univariant enums
998                 // that allow representation optimization.)
999                 assert!(def.is_enum());
1000
1001                 // The current code for niche-filling relies on variant indices
1002                 // instead of actual discriminants, so dataful enums with
1003                 // explicit discriminants (RFC #2363) would misbehave.
1004                 let no_explicit_discriminants = def
1005                     .variants
1006                     .iter_enumerated()
1007                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1008
1009                 let mut niche_filling_layout = None;
1010
1011                 // Niche-filling enum optimization.
1012                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1013                     let mut dataful_variant = None;
1014                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1015
1016                     // Find one non-ZST variant.
1017                     'variants: for (v, fields) in variants.iter_enumerated() {
1018                         if absent(fields) {
1019                             continue 'variants;
1020                         }
1021                         for f in fields {
1022                             if !f.is_zst() {
1023                                 if dataful_variant.is_none() {
1024                                     dataful_variant = Some(v);
1025                                     continue 'variants;
1026                                 } else {
1027                                     dataful_variant = None;
1028                                     break 'variants;
1029                                 }
1030                             }
1031                         }
1032                         niche_variants = *niche_variants.start().min(&v)..=v;
1033                     }
1034
1035                     if niche_variants.start() > niche_variants.end() {
1036                         dataful_variant = None;
1037                     }
1038
1039                     if let Some(i) = dataful_variant {
1040                         let count = (niche_variants.end().as_u32()
1041                             - niche_variants.start().as_u32()
1042                             + 1) as u128;
1043
1044                         // Find the field with the largest niche
1045                         let niche_candidate = variants[i]
1046                             .iter()
1047                             .enumerate()
1048                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1049                             .max_by_key(|(_, niche)| niche.available(dl));
1050
1051                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1052                             niche_candidate.and_then(|(field_index, niche)| {
1053                                 Some((field_index, niche, niche.reserve(self, count)?))
1054                             })
1055                         {
1056                             let mut align = dl.aggregate_align;
1057                             let st = variants
1058                                 .iter_enumerated()
1059                                 .map(|(j, v)| {
1060                                     let mut st = self.univariant_uninterned(
1061                                         ty,
1062                                         v,
1063                                         &def.repr,
1064                                         StructKind::AlwaysSized,
1065                                     )?;
1066                                     st.variants = Variants::Single { index: j };
1067
1068                                     align = align.max(st.align);
1069
1070                                     Ok(st)
1071                                 })
1072                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1073
1074                             let offset = st[i].fields.offset(field_index) + niche.offset;
1075                             let size = st[i].size;
1076
1077                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1078                                 Abi::Uninhabited
1079                             } else {
1080                                 match st[i].abi {
1081                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1082                                     Abi::ScalarPair(ref first, ref second) => {
1083                                         // We need to use scalar_unit to reset the
1084                                         // valid range to the maximal one for that
1085                                         // primitive, because only the niche is
1086                                         // guaranteed to be initialised, not the
1087                                         // other primitive.
1088                                         if offset.bytes() == 0 {
1089                                             Abi::ScalarPair(
1090                                                 niche_scalar.clone(),
1091                                                 scalar_unit(second.value),
1092                                             )
1093                                         } else {
1094                                             Abi::ScalarPair(
1095                                                 scalar_unit(first.value),
1096                                                 niche_scalar.clone(),
1097                                             )
1098                                         }
1099                                     }
1100                                     _ => Abi::Aggregate { sized: true },
1101                                 }
1102                             };
1103
1104                             let largest_niche =
1105                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1106
1107                             niche_filling_layout = Some(Layout {
1108                                 variants: Variants::Multiple {
1109                                     tag: niche_scalar,
1110                                     tag_encoding: TagEncoding::Niche {
1111                                         dataful_variant: i,
1112                                         niche_variants,
1113                                         niche_start,
1114                                     },
1115                                     tag_field: 0,
1116                                     variants: st,
1117                                 },
1118                                 fields: FieldsShape::Arbitrary {
1119                                     offsets: vec![offset],
1120                                     memory_index: vec![0],
1121                                 },
1122                                 abi,
1123                                 largest_niche,
1124                                 size,
1125                                 align,
1126                             });
1127                         }
1128                     }
1129                 }
1130
1131                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1132                 let discr_type = def.repr.discr_type();
1133                 let bits = Integer::from_attr(self, discr_type).size().bits();
1134                 for (i, discr) in def.discriminants(tcx) {
1135                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1136                         continue;
1137                     }
1138                     let mut x = discr.val as i128;
1139                     if discr_type.is_signed() {
1140                         // sign extend the raw representation to be an i128
1141                         x = (x << (128 - bits)) >> (128 - bits);
1142                     }
1143                     if x < min {
1144                         min = x;
1145                     }
1146                     if x > max {
1147                         max = x;
1148                     }
1149                 }
1150                 // We might have no inhabited variants, so pretend there's at least one.
1151                 if (min, max) == (i128::MAX, i128::MIN) {
1152                     min = 0;
1153                     max = 0;
1154                 }
1155                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1156                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1157
1158                 let mut align = dl.aggregate_align;
1159                 let mut size = Size::ZERO;
1160
1161                 // We're interested in the smallest alignment, so start large.
1162                 let mut start_align = Align::from_bytes(256).unwrap();
1163                 assert_eq!(Integer::for_align(dl, start_align), None);
1164
1165                 // repr(C) on an enum tells us to make a (tag, union) layout,
1166                 // so we need to grow the prefix alignment to be at least
1167                 // the alignment of the union. (This value is used both for
1168                 // determining the alignment of the overall enum, and the
1169                 // determining the alignment of the payload after the tag.)
1170                 let mut prefix_align = min_ity.align(dl).abi;
1171                 if def.repr.c() {
1172                     for fields in &variants {
1173                         for field in fields {
1174                             prefix_align = prefix_align.max(field.align.abi);
1175                         }
1176                     }
1177                 }
1178
1179                 // Create the set of structs that represent each variant.
1180                 let mut layout_variants = variants
1181                     .iter_enumerated()
1182                     .map(|(i, field_layouts)| {
1183                         let mut st = self.univariant_uninterned(
1184                             ty,
1185                             &field_layouts,
1186                             &def.repr,
1187                             StructKind::Prefixed(min_ity.size(), prefix_align),
1188                         )?;
1189                         st.variants = Variants::Single { index: i };
1190                         // Find the first field we can't move later
1191                         // to make room for a larger discriminant.
1192                         for field in
1193                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1194                         {
1195                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1196                                 start_align = start_align.min(field.align.abi);
1197                                 break;
1198                             }
1199                         }
1200                         size = cmp::max(size, st.size);
1201                         align = align.max(st.align);
1202                         Ok(st)
1203                     })
1204                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1205
1206                 // Align the maximum variant size to the largest alignment.
1207                 size = size.align_to(align.abi);
1208
1209                 if size.bytes() >= dl.obj_size_bound() {
1210                     return Err(LayoutError::SizeOverflow(ty));
1211                 }
1212
1213                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1214                 if typeck_ity < min_ity {
1215                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1216                     // some reason at this point (based on values discriminant can take on). Mostly
1217                     // because this discriminant will be loaded, and then stored into variable of
1218                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1219                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1220                     // discriminant values. That would be a bug, because then, in codegen, in order
1221                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1222                     // space necessary to represent would have to be discarded (or layout is wrong
1223                     // on thinking it needs 16 bits)
1224                     bug!(
1225                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1226                         min_ity,
1227                         typeck_ity
1228                     );
1229                     // However, it is fine to make discr type however large (as an optimisation)
1230                     // after this point â€“ we’ll just truncate the value we load in codegen.
1231                 }
1232
1233                 // Check to see if we should use a different type for the
1234                 // discriminant. We can safely use a type with the same size
1235                 // as the alignment of the first field of each variant.
1236                 // We increase the size of the discriminant to avoid LLVM copying
1237                 // padding when it doesn't need to. This normally causes unaligned
1238                 // load/stores and excessive memcpy/memset operations. By using a
1239                 // bigger integer size, LLVM can be sure about its contents and
1240                 // won't be so conservative.
1241
1242                 // Use the initial field alignment
1243                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1244                     min_ity
1245                 } else {
1246                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1247                 };
1248
1249                 // If the alignment is not larger than the chosen discriminant size,
1250                 // don't use the alignment as the final size.
1251                 if ity <= min_ity {
1252                     ity = min_ity;
1253                 } else {
1254                     // Patch up the variants' first few fields.
1255                     let old_ity_size = min_ity.size();
1256                     let new_ity_size = ity.size();
1257                     for variant in &mut layout_variants {
1258                         match variant.fields {
1259                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1260                                 for i in offsets {
1261                                     if *i <= old_ity_size {
1262                                         assert_eq!(*i, old_ity_size);
1263                                         *i = new_ity_size;
1264                                     }
1265                                 }
1266                                 // We might be making the struct larger.
1267                                 if variant.size <= old_ity_size {
1268                                     variant.size = new_ity_size;
1269                                 }
1270                             }
1271                             _ => bug!(),
1272                         }
1273                     }
1274                 }
1275
1276                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1277                 let tag = Scalar {
1278                     value: Int(ity, signed),
1279                     valid_range: WrappingRange {
1280                         start: (min as u128 & tag_mask),
1281                         end: (max as u128 & tag_mask),
1282                     },
1283                 };
1284                 let mut abi = Abi::Aggregate { sized: true };
1285                 if tag.value.size(dl) == size {
1286                     abi = Abi::Scalar(tag.clone());
1287                 } else {
1288                     // Try to use a ScalarPair for all tagged enums.
1289                     let mut common_prim = None;
1290                     for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1291                         let offsets = match layout_variant.fields {
1292                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1293                             _ => bug!(),
1294                         };
1295                         let mut fields =
1296                             iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1297                         let (field, offset) = match (fields.next(), fields.next()) {
1298                             (None, None) => continue,
1299                             (Some(pair), None) => pair,
1300                             _ => {
1301                                 common_prim = None;
1302                                 break;
1303                             }
1304                         };
1305                         let prim = match field.abi {
1306                             Abi::Scalar(ref scalar) => scalar.value,
1307                             _ => {
1308                                 common_prim = None;
1309                                 break;
1310                             }
1311                         };
1312                         if let Some(pair) = common_prim {
1313                             // This is pretty conservative. We could go fancier
1314                             // by conflating things like i32 and u32, or even
1315                             // realising that (u8, u8) could just cohabit with
1316                             // u16 or even u32.
1317                             if pair != (prim, offset) {
1318                                 common_prim = None;
1319                                 break;
1320                             }
1321                         } else {
1322                             common_prim = Some((prim, offset));
1323                         }
1324                     }
1325                     if let Some((prim, offset)) = common_prim {
1326                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1327                         let pair_offsets = match pair.fields {
1328                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1329                                 assert_eq!(memory_index, &[0, 1]);
1330                                 offsets
1331                             }
1332                             _ => bug!(),
1333                         };
1334                         if pair_offsets[0] == Size::ZERO
1335                             && pair_offsets[1] == *offset
1336                             && align == pair.align
1337                             && size == pair.size
1338                         {
1339                             // We can use `ScalarPair` only when it matches our
1340                             // already computed layout (including `#[repr(C)]`).
1341                             abi = pair.abi;
1342                         }
1343                     }
1344                 }
1345
1346                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1347                     abi = Abi::Uninhabited;
1348                 }
1349
1350                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1351
1352                 let tagged_layout = Layout {
1353                     variants: Variants::Multiple {
1354                         tag,
1355                         tag_encoding: TagEncoding::Direct,
1356                         tag_field: 0,
1357                         variants: layout_variants,
1358                     },
1359                     fields: FieldsShape::Arbitrary {
1360                         offsets: vec![Size::ZERO],
1361                         memory_index: vec![0],
1362                     },
1363                     largest_niche,
1364                     abi,
1365                     align,
1366                     size,
1367                 };
1368
1369                 let best_layout = match (tagged_layout, niche_filling_layout) {
1370                     (tagged_layout, Some(niche_filling_layout)) => {
1371                         // Pick the smaller layout; otherwise,
1372                         // pick the layout with the larger niche; otherwise,
1373                         // pick tagged as it has simpler codegen.
1374                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1375                             let niche_size =
1376                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1377                             (layout.size, cmp::Reverse(niche_size))
1378                         })
1379                     }
1380                     (tagged_layout, None) => tagged_layout,
1381                 };
1382
1383                 tcx.intern_layout(best_layout)
1384             }
1385
1386             // Types with no meaningful known layout.
1387             ty::Projection(_) | ty::Opaque(..) => {
1388                 // NOTE(eddyb) `layout_of` query should've normalized these away,
1389                 // if that was possible, so there's no reason to try again here.
1390                 return Err(LayoutError::Unknown(ty));
1391             }
1392
1393             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1394                 bug!("Layout::compute: unexpected type `{}`", ty)
1395             }
1396
1397             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1398                 return Err(LayoutError::Unknown(ty));
1399             }
1400         })
1401     }
1402 }
1403
1404 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1405 #[derive(Clone, Debug, PartialEq)]
1406 enum SavedLocalEligibility {
1407     Unassigned,
1408     Assigned(VariantIdx),
1409     // FIXME: Use newtype_index so we aren't wasting bytes
1410     Ineligible(Option<u32>),
1411 }
1412
1413 // When laying out generators, we divide our saved local fields into two
1414 // categories: overlap-eligible and overlap-ineligible.
1415 //
1416 // Those fields which are ineligible for overlap go in a "prefix" at the
1417 // beginning of the layout, and always have space reserved for them.
1418 //
1419 // Overlap-eligible fields are only assigned to one variant, so we lay
1420 // those fields out for each variant and put them right after the
1421 // prefix.
1422 //
1423 // Finally, in the layout details, we point to the fields from the
1424 // variants they are assigned to. It is possible for some fields to be
1425 // included in multiple variants. No field ever "moves around" in the
1426 // layout; its offset is always the same.
1427 //
1428 // Also included in the layout are the upvars and the discriminant.
1429 // These are included as fields on the "outer" layout; they are not part
1430 // of any variant.
1431 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1432     /// Compute the eligibility and assignment of each local.
1433     fn generator_saved_local_eligibility(
1434         &self,
1435         info: &GeneratorLayout<'tcx>,
1436     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1437         use SavedLocalEligibility::*;
1438
1439         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1440             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1441
1442         // The saved locals not eligible for overlap. These will get
1443         // "promoted" to the prefix of our generator.
1444         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1445
1446         // Figure out which of our saved locals are fields in only
1447         // one variant. The rest are deemed ineligible for overlap.
1448         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1449             for local in fields {
1450                 match assignments[*local] {
1451                     Unassigned => {
1452                         assignments[*local] = Assigned(variant_index);
1453                     }
1454                     Assigned(idx) => {
1455                         // We've already seen this local at another suspension
1456                         // point, so it is no longer a candidate.
1457                         trace!(
1458                             "removing local {:?} in >1 variant ({:?}, {:?})",
1459                             local,
1460                             variant_index,
1461                             idx
1462                         );
1463                         ineligible_locals.insert(*local);
1464                         assignments[*local] = Ineligible(None);
1465                     }
1466                     Ineligible(_) => {}
1467                 }
1468             }
1469         }
1470
1471         // Next, check every pair of eligible locals to see if they
1472         // conflict.
1473         for local_a in info.storage_conflicts.rows() {
1474             let conflicts_a = info.storage_conflicts.count(local_a);
1475             if ineligible_locals.contains(local_a) {
1476                 continue;
1477             }
1478
1479             for local_b in info.storage_conflicts.iter(local_a) {
1480                 // local_a and local_b are storage live at the same time, therefore they
1481                 // cannot overlap in the generator layout. The only way to guarantee
1482                 // this is if they are in the same variant, or one is ineligible
1483                 // (which means it is stored in every variant).
1484                 if ineligible_locals.contains(local_b)
1485                     || assignments[local_a] == assignments[local_b]
1486                 {
1487                     continue;
1488                 }
1489
1490                 // If they conflict, we will choose one to make ineligible.
1491                 // This is not always optimal; it's just a greedy heuristic that
1492                 // seems to produce good results most of the time.
1493                 let conflicts_b = info.storage_conflicts.count(local_b);
1494                 let (remove, other) =
1495                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1496                 ineligible_locals.insert(remove);
1497                 assignments[remove] = Ineligible(None);
1498                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1499             }
1500         }
1501
1502         // Count the number of variants in use. If only one of them, then it is
1503         // impossible to overlap any locals in our layout. In this case it's
1504         // always better to make the remaining locals ineligible, so we can
1505         // lay them out with the other locals in the prefix and eliminate
1506         // unnecessary padding bytes.
1507         {
1508             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1509             for assignment in &assignments {
1510                 if let Assigned(idx) = assignment {
1511                     used_variants.insert(*idx);
1512                 }
1513             }
1514             if used_variants.count() < 2 {
1515                 for assignment in assignments.iter_mut() {
1516                     *assignment = Ineligible(None);
1517                 }
1518                 ineligible_locals.insert_all();
1519             }
1520         }
1521
1522         // Write down the order of our locals that will be promoted to the prefix.
1523         {
1524             for (idx, local) in ineligible_locals.iter().enumerate() {
1525                 assignments[local] = Ineligible(Some(idx as u32));
1526             }
1527         }
1528         debug!("generator saved local assignments: {:?}", assignments);
1529
1530         (ineligible_locals, assignments)
1531     }
1532
1533     /// Compute the full generator layout.
1534     fn generator_layout(
1535         &self,
1536         ty: Ty<'tcx>,
1537         def_id: hir::def_id::DefId,
1538         substs: SubstsRef<'tcx>,
1539     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1540         use SavedLocalEligibility::*;
1541         let tcx = self.tcx;
1542         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1543
1544         let info = match tcx.generator_layout(def_id) {
1545             None => return Err(LayoutError::Unknown(ty)),
1546             Some(info) => info,
1547         };
1548         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1549
1550         // Build a prefix layout, including "promoting" all ineligible
1551         // locals as part of the prefix. We compute the layout of all of
1552         // these fields at once to get optimal packing.
1553         let tag_index = substs.as_generator().prefix_tys().count();
1554
1555         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1556         let max_discr = (info.variant_fields.len() - 1) as u128;
1557         let discr_int = Integer::fit_unsigned(max_discr);
1558         let discr_int_ty = discr_int.to_ty(tcx, false);
1559         let tag = Scalar {
1560             value: Primitive::Int(discr_int, false),
1561             valid_range: WrappingRange { start: 0, end: max_discr },
1562         };
1563         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1564         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1565
1566         let promoted_layouts = ineligible_locals
1567             .iter()
1568             .map(|local| subst_field(info.field_tys[local]))
1569             .map(|ty| tcx.mk_maybe_uninit(ty))
1570             .map(|ty| self.layout_of(ty));
1571         let prefix_layouts = substs
1572             .as_generator()
1573             .prefix_tys()
1574             .map(|ty| self.layout_of(ty))
1575             .chain(iter::once(Ok(tag_layout)))
1576             .chain(promoted_layouts)
1577             .collect::<Result<Vec<_>, _>>()?;
1578         let prefix = self.univariant_uninterned(
1579             ty,
1580             &prefix_layouts,
1581             &ReprOptions::default(),
1582             StructKind::AlwaysSized,
1583         )?;
1584
1585         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1586
1587         // Split the prefix layout into the "outer" fields (upvars and
1588         // discriminant) and the "promoted" fields. Promoted fields will
1589         // get included in each variant that requested them in
1590         // GeneratorLayout.
1591         debug!("prefix = {:#?}", prefix);
1592         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1593             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1594                 let mut inverse_memory_index = invert_mapping(&memory_index);
1595
1596                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1597                 // "outer" and "promoted" fields respectively.
1598                 let b_start = (tag_index + 1) as u32;
1599                 let offsets_b = offsets.split_off(b_start as usize);
1600                 let offsets_a = offsets;
1601
1602                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1603                 // by preserving the order but keeping only one disjoint "half" each.
1604                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1605                 let inverse_memory_index_b: Vec<_> =
1606                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1607                 inverse_memory_index.retain(|&i| i < b_start);
1608                 let inverse_memory_index_a = inverse_memory_index;
1609
1610                 // Since `inverse_memory_index_{a,b}` each only refer to their
1611                 // respective fields, they can be safely inverted
1612                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1613                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1614
1615                 let outer_fields =
1616                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1617                 (outer_fields, offsets_b, memory_index_b)
1618             }
1619             _ => bug!(),
1620         };
1621
1622         let mut size = prefix.size;
1623         let mut align = prefix.align;
1624         let variants = info
1625             .variant_fields
1626             .iter_enumerated()
1627             .map(|(index, variant_fields)| {
1628                 // Only include overlap-eligible fields when we compute our variant layout.
1629                 let variant_only_tys = variant_fields
1630                     .iter()
1631                     .filter(|local| match assignments[**local] {
1632                         Unassigned => bug!(),
1633                         Assigned(v) if v == index => true,
1634                         Assigned(_) => bug!("assignment does not match variant"),
1635                         Ineligible(_) => false,
1636                     })
1637                     .map(|local| subst_field(info.field_tys[*local]));
1638
1639                 let mut variant = self.univariant_uninterned(
1640                     ty,
1641                     &variant_only_tys
1642                         .map(|ty| self.layout_of(ty))
1643                         .collect::<Result<Vec<_>, _>>()?,
1644                     &ReprOptions::default(),
1645                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1646                 )?;
1647                 variant.variants = Variants::Single { index };
1648
1649                 let (offsets, memory_index) = match variant.fields {
1650                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1651                     _ => bug!(),
1652                 };
1653
1654                 // Now, stitch the promoted and variant-only fields back together in
1655                 // the order they are mentioned by our GeneratorLayout.
1656                 // Because we only use some subset (that can differ between variants)
1657                 // of the promoted fields, we can't just pick those elements of the
1658                 // `promoted_memory_index` (as we'd end up with gaps).
1659                 // So instead, we build an "inverse memory_index", as if all of the
1660                 // promoted fields were being used, but leave the elements not in the
1661                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1662                 // obtain a valid (bijective) mapping.
1663                 const INVALID_FIELD_IDX: u32 = !0;
1664                 let mut combined_inverse_memory_index =
1665                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1666                 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1667                 let combined_offsets = variant_fields
1668                     .iter()
1669                     .enumerate()
1670                     .map(|(i, local)| {
1671                         let (offset, memory_index) = match assignments[*local] {
1672                             Unassigned => bug!(),
1673                             Assigned(_) => {
1674                                 let (offset, memory_index) =
1675                                     offsets_and_memory_index.next().unwrap();
1676                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1677                             }
1678                             Ineligible(field_idx) => {
1679                                 let field_idx = field_idx.unwrap() as usize;
1680                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1681                             }
1682                         };
1683                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1684                         offset
1685                     })
1686                     .collect();
1687
1688                 // Remove the unused slots and invert the mapping to obtain the
1689                 // combined `memory_index` (also see previous comment).
1690                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1691                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1692
1693                 variant.fields = FieldsShape::Arbitrary {
1694                     offsets: combined_offsets,
1695                     memory_index: combined_memory_index,
1696                 };
1697
1698                 size = size.max(variant.size);
1699                 align = align.max(variant.align);
1700                 Ok(variant)
1701             })
1702             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1703
1704         size = size.align_to(align.abi);
1705
1706         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1707         {
1708             Abi::Uninhabited
1709         } else {
1710             Abi::Aggregate { sized: true }
1711         };
1712
1713         let layout = tcx.intern_layout(Layout {
1714             variants: Variants::Multiple {
1715                 tag,
1716                 tag_encoding: TagEncoding::Direct,
1717                 tag_field: tag_index,
1718                 variants,
1719             },
1720             fields: outer_fields,
1721             abi,
1722             largest_niche: prefix.largest_niche,
1723             size,
1724             align,
1725         });
1726         debug!("generator layout ({:?}): {:#?}", ty, layout);
1727         Ok(layout)
1728     }
1729
1730     /// This is invoked by the `layout_of` query to record the final
1731     /// layout of each type.
1732     #[inline(always)]
1733     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1734         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1735         // for dumping later.
1736         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1737             self.record_layout_for_printing_outlined(layout)
1738         }
1739     }
1740
1741     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1742         // Ignore layouts that are done with non-empty environments or
1743         // non-monomorphic layouts, as the user only wants to see the stuff
1744         // resulting from the final codegen session.
1745         if layout.ty.definitely_has_param_types_or_consts(self.tcx)
1746             || !self.param_env.caller_bounds().is_empty()
1747         {
1748             return;
1749         }
1750
1751         // (delay format until we actually need it)
1752         let record = |kind, packed, opt_discr_size, variants| {
1753             let type_desc = format!("{:?}", layout.ty);
1754             self.tcx.sess.code_stats.record_type_size(
1755                 kind,
1756                 type_desc,
1757                 layout.align.abi,
1758                 layout.size,
1759                 packed,
1760                 opt_discr_size,
1761                 variants,
1762             );
1763         };
1764
1765         let adt_def = match *layout.ty.kind() {
1766             ty::Adt(ref adt_def, _) => {
1767                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1768                 adt_def
1769             }
1770
1771             ty::Closure(..) => {
1772                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1773                 record(DataTypeKind::Closure, false, None, vec![]);
1774                 return;
1775             }
1776
1777             _ => {
1778                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1779                 return;
1780             }
1781         };
1782
1783         let adt_kind = adt_def.adt_kind();
1784         let adt_packed = adt_def.repr.pack.is_some();
1785
1786         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1787             let mut min_size = Size::ZERO;
1788             let field_info: Vec<_> = flds
1789                 .iter()
1790                 .enumerate()
1791                 .map(|(i, &name)| {
1792                     let field_layout = layout.field(self, i);
1793                     let offset = layout.fields.offset(i);
1794                     let field_end = offset + field_layout.size;
1795                     if min_size < field_end {
1796                         min_size = field_end;
1797                     }
1798                     FieldInfo {
1799                         name: name.to_string(),
1800                         offset: offset.bytes(),
1801                         size: field_layout.size.bytes(),
1802                         align: field_layout.align.abi.bytes(),
1803                     }
1804                 })
1805                 .collect();
1806
1807             VariantInfo {
1808                 name: n.map(|n| n.to_string()),
1809                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1810                 align: layout.align.abi.bytes(),
1811                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1812                 fields: field_info,
1813             }
1814         };
1815
1816         match layout.variants {
1817             Variants::Single { index } => {
1818                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1819                 if !adt_def.variants.is_empty() {
1820                     let variant_def = &adt_def.variants[index];
1821                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1822                     record(
1823                         adt_kind.into(),
1824                         adt_packed,
1825                         None,
1826                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1827                     );
1828                 } else {
1829                     // (This case arises for *empty* enums; so give it
1830                     // zero variants.)
1831                     record(adt_kind.into(), adt_packed, None, vec![]);
1832                 }
1833             }
1834
1835             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1836                 debug!(
1837                     "print-type-size `{:#?}` adt general variants def {}",
1838                     layout.ty,
1839                     adt_def.variants.len()
1840                 );
1841                 let variant_infos: Vec<_> = adt_def
1842                     .variants
1843                     .iter_enumerated()
1844                     .map(|(i, variant_def)| {
1845                         let fields: Vec<_> =
1846                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1847                         build_variant_info(
1848                             Some(variant_def.ident),
1849                             &fields,
1850                             layout.for_variant(self, i),
1851                         )
1852                     })
1853                     .collect();
1854                 record(
1855                     adt_kind.into(),
1856                     adt_packed,
1857                     match tag_encoding {
1858                         TagEncoding::Direct => Some(tag.value.size(self)),
1859                         _ => None,
1860                     },
1861                     variant_infos,
1862                 );
1863             }
1864         }
1865     }
1866 }
1867
1868 /// Type size "skeleton", i.e., the only information determining a type's size.
1869 /// While this is conservative, (aside from constant sizes, only pointers,
1870 /// newtypes thereof and null pointer optimized enums are allowed), it is
1871 /// enough to statically check common use cases of transmute.
1872 #[derive(Copy, Clone, Debug)]
1873 pub enum SizeSkeleton<'tcx> {
1874     /// Any statically computable Layout.
1875     Known(Size),
1876
1877     /// A potentially-fat pointer.
1878     Pointer {
1879         /// If true, this pointer is never null.
1880         non_zero: bool,
1881         /// The type which determines the unsized metadata, if any,
1882         /// of this pointer. Either a type parameter or a projection
1883         /// depending on one, with regions erased.
1884         tail: Ty<'tcx>,
1885     },
1886 }
1887
1888 impl<'tcx> SizeSkeleton<'tcx> {
1889     pub fn compute(
1890         ty: Ty<'tcx>,
1891         tcx: TyCtxt<'tcx>,
1892         param_env: ty::ParamEnv<'tcx>,
1893     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1894         debug_assert!(!ty.has_infer_types_or_consts());
1895
1896         // First try computing a static layout.
1897         let err = match tcx.layout_of(param_env.and(ty)) {
1898             Ok(layout) => {
1899                 return Ok(SizeSkeleton::Known(layout.size));
1900             }
1901             Err(err) => err,
1902         };
1903
1904         match *ty.kind() {
1905             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1906                 let non_zero = !ty.is_unsafe_ptr();
1907                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1908                 match tail.kind() {
1909                     ty::Param(_) | ty::Projection(_) => {
1910                         debug_assert!(tail.definitely_has_param_types_or_consts(tcx));
1911                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1912                     }
1913                     _ => bug!(
1914                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1915                               tail `{}` is not a type parameter or a projection",
1916                         ty,
1917                         err,
1918                         tail
1919                     ),
1920                 }
1921             }
1922
1923             ty::Adt(def, substs) => {
1924                 // Only newtypes and enums w/ nullable pointer optimization.
1925                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1926                     return Err(err);
1927                 }
1928
1929                 // Get a zero-sized variant or a pointer newtype.
1930                 let zero_or_ptr_variant = |i| {
1931                     let i = VariantIdx::new(i);
1932                     let fields = def.variants[i]
1933                         .fields
1934                         .iter()
1935                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1936                     let mut ptr = None;
1937                     for field in fields {
1938                         let field = field?;
1939                         match field {
1940                             SizeSkeleton::Known(size) => {
1941                                 if size.bytes() > 0 {
1942                                     return Err(err);
1943                                 }
1944                             }
1945                             SizeSkeleton::Pointer { .. } => {
1946                                 if ptr.is_some() {
1947                                     return Err(err);
1948                                 }
1949                                 ptr = Some(field);
1950                             }
1951                         }
1952                     }
1953                     Ok(ptr)
1954                 };
1955
1956                 let v0 = zero_or_ptr_variant(0)?;
1957                 // Newtype.
1958                 if def.variants.len() == 1 {
1959                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1960                         return Ok(SizeSkeleton::Pointer {
1961                             non_zero: non_zero
1962                                 || match tcx.layout_scalar_valid_range(def.did) {
1963                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1964                                     (Bound::Included(start), Bound::Included(end)) => {
1965                                         0 < start && start < end
1966                                     }
1967                                     _ => false,
1968                                 },
1969                             tail,
1970                         });
1971                     } else {
1972                         return Err(err);
1973                     }
1974                 }
1975
1976                 let v1 = zero_or_ptr_variant(1)?;
1977                 // Nullable pointer enum optimization.
1978                 match (v0, v1) {
1979                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1980                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1981                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1982                     }
1983                     _ => Err(err),
1984                 }
1985             }
1986
1987             ty::Projection(_) | ty::Opaque(..) => {
1988                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1989                 if ty == normalized {
1990                     Err(err)
1991                 } else {
1992                     SizeSkeleton::compute(normalized, tcx, param_env)
1993                 }
1994             }
1995
1996             _ => Err(err),
1997         }
1998     }
1999
2000     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2001         match (self, other) {
2002             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2003             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2004                 a == b
2005             }
2006             _ => false,
2007         }
2008     }
2009 }
2010
2011 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2012     fn tcx(&self) -> TyCtxt<'tcx>;
2013 }
2014
2015 pub trait HasParamEnv<'tcx> {
2016     fn param_env(&self) -> ty::ParamEnv<'tcx>;
2017 }
2018
2019 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2020     #[inline]
2021     fn data_layout(&self) -> &TargetDataLayout {
2022         &self.data_layout
2023     }
2024 }
2025
2026 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2027     #[inline]
2028     fn tcx(&self) -> TyCtxt<'tcx> {
2029         *self
2030     }
2031 }
2032
2033 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2034     #[inline]
2035     fn data_layout(&self) -> &TargetDataLayout {
2036         &self.data_layout
2037     }
2038 }
2039
2040 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2041     #[inline]
2042     fn tcx(&self) -> TyCtxt<'tcx> {
2043         **self
2044     }
2045 }
2046
2047 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2048     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2049         self.param_env
2050     }
2051 }
2052
2053 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2054     fn data_layout(&self) -> &TargetDataLayout {
2055         self.tcx.data_layout()
2056     }
2057 }
2058
2059 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2060     fn tcx(&self) -> TyCtxt<'tcx> {
2061         self.tcx.tcx()
2062     }
2063 }
2064
2065 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2066
2067 impl LayoutOf<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2068     type Ty = Ty<'tcx>;
2069     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2070
2071     /// Computes the layout of a type. Note that this implicitly
2072     /// executes in "reveal all" mode, and will normalize the input type.
2073     #[inline]
2074     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2075         self.tcx.layout_of(self.param_env.and(ty))
2076     }
2077 }
2078
2079 impl LayoutOf<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2080     type Ty = Ty<'tcx>;
2081     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2082
2083     /// Computes the layout of a type. Note that this implicitly
2084     /// executes in "reveal all" mode, and will normalize the input type.
2085     #[inline]
2086     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2087         self.tcx.layout_of(self.param_env.and(ty))
2088     }
2089 }
2090
2091 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2092 where
2093     C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2094 {
2095     fn ty_and_layout_for_variant(
2096         this: TyAndLayout<'tcx>,
2097         cx: &C,
2098         variant_index: VariantIdx,
2099     ) -> TyAndLayout<'tcx> {
2100         let layout = match this.variants {
2101             Variants::Single { index }
2102                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2103                 if index == variant_index &&
2104                 // Don't confuse variants of uninhabited enums with the enum itself.
2105                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2106                 this.fields != FieldsShape::Primitive =>
2107             {
2108                 this.layout
2109             }
2110
2111             Variants::Single { index } => {
2112                 let tcx = cx.tcx();
2113                 let param_env = cx.param_env();
2114
2115                 // Deny calling for_variant more than once for non-Single enums.
2116                 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2117                     assert_eq!(original_layout.variants, Variants::Single { index });
2118                 }
2119
2120                 let fields = match this.ty.kind() {
2121                     ty::Adt(def, _) if def.variants.is_empty() =>
2122                         bug!("for_variant called on zero-variant enum"),
2123                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2124                     _ => bug!(),
2125                 };
2126                 tcx.intern_layout(Layout {
2127                     variants: Variants::Single { index: variant_index },
2128                     fields: match NonZeroUsize::new(fields) {
2129                         Some(fields) => FieldsShape::Union(fields),
2130                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2131                     },
2132                     abi: Abi::Uninhabited,
2133                     largest_niche: None,
2134                     align: tcx.data_layout.i8_align,
2135                     size: Size::ZERO,
2136                 })
2137             }
2138
2139             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2140         };
2141
2142         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2143
2144         TyAndLayout { ty: this.ty, layout }
2145     }
2146
2147     fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2148         enum TyMaybeWithLayout<'tcx> {
2149             Ty(Ty<'tcx>),
2150             TyAndLayout(TyAndLayout<'tcx>),
2151         }
2152
2153         fn field_ty_or_layout(
2154             this: TyAndLayout<'tcx>,
2155             cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2156             i: usize,
2157         ) -> TyMaybeWithLayout<'tcx> {
2158             let tcx = cx.tcx();
2159             let tag_layout = |tag: &Scalar| -> TyAndLayout<'tcx> {
2160                 let layout = Layout::scalar(cx, tag.clone());
2161                 TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
2162             };
2163
2164             match *this.ty.kind() {
2165                 ty::Bool
2166                 | ty::Char
2167                 | ty::Int(_)
2168                 | ty::Uint(_)
2169                 | ty::Float(_)
2170                 | ty::FnPtr(_)
2171                 | ty::Never
2172                 | ty::FnDef(..)
2173                 | ty::GeneratorWitness(..)
2174                 | ty::Foreign(..)
2175                 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2176
2177                 // Potentially-fat pointers.
2178                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2179                     assert!(i < this.fields.count());
2180
2181                     // Reuse the fat `*T` type as its own thin pointer data field.
2182                     // This provides information about, e.g., DST struct pointees
2183                     // (which may have no non-DST form), and will work as long
2184                     // as the `Abi` or `FieldsShape` is checked by users.
2185                     if i == 0 {
2186                         let nil = tcx.mk_unit();
2187                         let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2188                             tcx.mk_mut_ptr(nil)
2189                         } else {
2190                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2191                         };
2192
2193                         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2194                         // the `Result` should always work because the type is
2195                         // always either `*mut ()` or `&'static mut ()`.
2196                         return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2197                             ty: this.ty,
2198                             ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2199                         });
2200                     }
2201
2202                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2203                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2204                         ty::Dynamic(_, _) => {
2205                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2206                                 tcx.lifetimes.re_static,
2207                                 tcx.mk_array(tcx.types.usize, 3),
2208                             ))
2209                             /* FIXME: use actual fn pointers
2210                             Warning: naively computing the number of entries in the
2211                             vtable by counting the methods on the trait + methods on
2212                             all parent traits does not work, because some methods can
2213                             be not object safe and thus excluded from the vtable.
2214                             Increase this counter if you tried to implement this but
2215                             failed to do it without duplicating a lot of code from
2216                             other places in the compiler: 2
2217                             tcx.mk_tup(&[
2218                                 tcx.mk_array(tcx.types.usize, 3),
2219                                 tcx.mk_array(Option<fn()>),
2220                             ])
2221                             */
2222                         }
2223                         _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2224                     }
2225                 }
2226
2227                 // Arrays and slices.
2228                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2229                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2230
2231                 // Tuples, generators and closures.
2232                 ty::Closure(_, ref substs) => field_ty_or_layout(
2233                     TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2234                     cx,
2235                     i,
2236                 ),
2237
2238                 ty::Generator(def_id, ref substs, _) => match this.variants {
2239                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2240                         substs
2241                             .as_generator()
2242                             .state_tys(def_id, tcx)
2243                             .nth(index.as_usize())
2244                             .unwrap()
2245                             .nth(i)
2246                             .unwrap(),
2247                     ),
2248                     Variants::Multiple { ref tag, tag_field, .. } => {
2249                         if i == tag_field {
2250                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2251                         }
2252                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2253                     }
2254                 },
2255
2256                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2257
2258                 // ADTs.
2259                 ty::Adt(def, substs) => {
2260                     match this.variants {
2261                         Variants::Single { index } => {
2262                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2263                         }
2264
2265                         // Discriminant field for enums (where applicable).
2266                         Variants::Multiple { ref tag, .. } => {
2267                             assert_eq!(i, 0);
2268                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2269                         }
2270                     }
2271                 }
2272
2273                 ty::Projection(_)
2274                 | ty::Bound(..)
2275                 | ty::Placeholder(..)
2276                 | ty::Opaque(..)
2277                 | ty::Param(_)
2278                 | ty::Infer(_)
2279                 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2280             }
2281         }
2282
2283         match field_ty_or_layout(this, cx, i) {
2284             TyMaybeWithLayout::Ty(field_ty) => {
2285                 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2286                     bug!(
2287                         "failed to get layout for `{}`: {},\n\
2288                          despite it being a field (#{}) of an existing layout: {:#?}",
2289                         field_ty,
2290                         e,
2291                         i,
2292                         this
2293                     )
2294                 })
2295             }
2296             TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2297         }
2298     }
2299
2300     fn ty_and_layout_pointee_info_at(
2301         this: TyAndLayout<'tcx>,
2302         cx: &C,
2303         offset: Size,
2304     ) -> Option<PointeeInfo> {
2305         let tcx = cx.tcx();
2306         let param_env = cx.param_env();
2307
2308         let addr_space_of_ty = |ty: Ty<'tcx>| {
2309             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2310         };
2311
2312         let pointee_info = match *this.ty.kind() {
2313             ty::RawPtr(mt) if offset.bytes() == 0 => {
2314                 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2315                     size: layout.size,
2316                     align: layout.align.abi,
2317                     safe: None,
2318                     address_space: addr_space_of_ty(mt.ty),
2319                 })
2320             }
2321             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2322                 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2323                     size: layout.size,
2324                     align: layout.align.abi,
2325                     safe: None,
2326                     address_space: cx.data_layout().instruction_address_space,
2327                 })
2328             }
2329             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2330                 let address_space = addr_space_of_ty(ty);
2331                 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2332                     // Use conservative pointer kind if not optimizing. This saves us the
2333                     // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2334                     // attributes in LLVM have compile-time cost even in unoptimized builds).
2335                     PointerKind::Shared
2336                 } else {
2337                     match mt {
2338                         hir::Mutability::Not => {
2339                             if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2340                                 PointerKind::Frozen
2341                             } else {
2342                                 PointerKind::Shared
2343                             }
2344                         }
2345                         hir::Mutability::Mut => {
2346                             // References to self-referential structures should not be considered
2347                             // noalias, as another pointer to the structure can be obtained, that
2348                             // is not based-on the original reference. We consider all !Unpin
2349                             // types to be potentially self-referential here.
2350                             if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2351                                 PointerKind::UniqueBorrowed
2352                             } else {
2353                                 PointerKind::Shared
2354                             }
2355                         }
2356                     }
2357                 };
2358
2359                 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2360                     size: layout.size,
2361                     align: layout.align.abi,
2362                     safe: Some(kind),
2363                     address_space,
2364                 })
2365             }
2366
2367             _ => {
2368                 let mut data_variant = match this.variants {
2369                     // Within the discriminant field, only the niche itself is
2370                     // always initialized, so we only check for a pointer at its
2371                     // offset.
2372                     //
2373                     // If the niche is a pointer, it's either valid (according
2374                     // to its type), or null (which the niche field's scalar
2375                     // validity range encodes).  This allows using
2376                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2377                     // this will continue to work as long as we don't start
2378                     // using more niches than just null (e.g., the first page of
2379                     // the address space, or unaligned pointers).
2380                     Variants::Multiple {
2381                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2382                         tag_field,
2383                         ..
2384                     } if this.fields.offset(tag_field) == offset => {
2385                         Some(this.for_variant(cx, dataful_variant))
2386                     }
2387                     _ => Some(this),
2388                 };
2389
2390                 if let Some(variant) = data_variant {
2391                     // We're not interested in any unions.
2392                     if let FieldsShape::Union(_) = variant.fields {
2393                         data_variant = None;
2394                     }
2395                 }
2396
2397                 let mut result = None;
2398
2399                 if let Some(variant) = data_variant {
2400                     let ptr_end = offset + Pointer.size(cx);
2401                     for i in 0..variant.fields.count() {
2402                         let field_start = variant.fields.offset(i);
2403                         if field_start <= offset {
2404                             let field = variant.field(cx, i);
2405                             result = field.to_result().ok().and_then(|field| {
2406                                 if ptr_end <= field_start + field.size {
2407                                     // We found the right field, look inside it.
2408                                     let field_info =
2409                                         field.pointee_info_at(cx, offset - field_start);
2410                                     field_info
2411                                 } else {
2412                                     None
2413                                 }
2414                             });
2415                             if result.is_some() {
2416                                 break;
2417                             }
2418                         }
2419                     }
2420                 }
2421
2422                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2423                 if let Some(ref mut pointee) = result {
2424                     if let ty::Adt(def, _) = this.ty.kind() {
2425                         if def.is_box() && offset.bytes() == 0 {
2426                             pointee.safe = Some(PointerKind::UniqueOwned);
2427                         }
2428                     }
2429                 }
2430
2431                 result
2432             }
2433         };
2434
2435         debug!(
2436             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2437             offset,
2438             this.ty.kind(),
2439             pointee_info
2440         );
2441
2442         pointee_info
2443     }
2444 }
2445
2446 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2447     #[inline]
2448     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2449         use crate::ty::layout::LayoutError::*;
2450         mem::discriminant(self).hash_stable(hcx, hasher);
2451
2452         match *self {
2453             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2454         }
2455     }
2456 }
2457
2458 impl<'tcx> ty::Instance<'tcx> {
2459     // NOTE(eddyb) this is private to avoid using it from outside of
2460     // `FnAbi::of_instance` - any other uses are either too high-level
2461     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2462     // or should go through `FnAbi` instead, to avoid losing any
2463     // adjustments `FnAbi::of_instance` might be performing.
2464     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2465         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2466         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2467         match *ty.kind() {
2468             ty::FnDef(..) => {
2469                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2470                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2471                 // (i.e. due to being inside a projection that got normalized, see
2472                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2473                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2474                 let mut sig = match *ty.kind() {
2475                     ty::FnDef(def_id, substs) => tcx
2476                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2477                         .subst(tcx, substs),
2478                     _ => unreachable!(),
2479                 };
2480
2481                 if let ty::InstanceDef::VtableShim(..) = self.def {
2482                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2483                     sig = sig.map_bound(|mut sig| {
2484                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2485                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2486                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2487                         sig
2488                     });
2489                 }
2490                 sig
2491             }
2492             ty::Closure(def_id, substs) => {
2493                 let sig = substs.as_closure().sig();
2494
2495                 let bound_vars = tcx.mk_bound_variable_kinds(
2496                     sig.bound_vars()
2497                         .iter()
2498                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2499                 );
2500                 let br = ty::BoundRegion {
2501                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2502                     kind: ty::BoundRegionKind::BrEnv,
2503                 };
2504                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2505                 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2506
2507                 let sig = sig.skip_binder();
2508                 ty::Binder::bind_with_vars(
2509                     tcx.mk_fn_sig(
2510                         iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2511                         sig.output(),
2512                         sig.c_variadic,
2513                         sig.unsafety,
2514                         sig.abi,
2515                     ),
2516                     bound_vars,
2517                 )
2518             }
2519             ty::Generator(_, substs, _) => {
2520                 let sig = substs.as_generator().poly_sig();
2521
2522                 let bound_vars = tcx.mk_bound_variable_kinds(
2523                     sig.bound_vars()
2524                         .iter()
2525                         .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2526                 );
2527                 let br = ty::BoundRegion {
2528                     var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2529                     kind: ty::BoundRegionKind::BrEnv,
2530                 };
2531                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2532                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2533
2534                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2535                 let pin_adt_ref = tcx.adt_def(pin_did);
2536                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2537                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2538
2539                 let sig = sig.skip_binder();
2540                 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2541                 let state_adt_ref = tcx.adt_def(state_did);
2542                 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2543                 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2544                 ty::Binder::bind_with_vars(
2545                     tcx.mk_fn_sig(
2546                         [env_ty, sig.resume_ty].iter(),
2547                         &ret_ty,
2548                         false,
2549                         hir::Unsafety::Normal,
2550                         rustc_target::spec::abi::Abi::Rust,
2551                     ),
2552                     bound_vars,
2553                 )
2554             }
2555             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2556         }
2557     }
2558 }
2559
2560 pub trait FnAbiExt<'tcx, C>
2561 where
2562     C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2563         + HasDataLayout
2564         + HasTargetSpec
2565         + HasTyCtxt<'tcx>
2566         + HasParamEnv<'tcx>,
2567 {
2568     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2569     ///
2570     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2571     /// instead, where the instance is an `InstanceDef::Virtual`.
2572     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2573
2574     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2575     /// direct calls to an `fn`.
2576     ///
2577     /// NB: that includes virtual calls, which are represented by "direct calls"
2578     /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2579     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2580
2581     fn new_internal(
2582         cx: &C,
2583         sig: ty::PolyFnSig<'tcx>,
2584         extra_args: &[Ty<'tcx>],
2585         caller_location: Option<Ty<'tcx>>,
2586         codegen_fn_attr_flags: CodegenFnAttrFlags,
2587         make_self_ptr_thin: bool,
2588     ) -> Self;
2589     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2590 }
2591
2592 /// Calculates whether a function's ABI can unwind or not.
2593 ///
2594 /// This takes two primary parameters:
2595 ///
2596 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2597 ///   codegen attrs for a defined function. For function pointers this set of
2598 ///   flags is the empty set. This is only applicable for Rust-defined
2599 ///   functions, and generally isn't needed except for small optimizations where
2600 ///   we try to say a function which otherwise might look like it could unwind
2601 ///   doesn't actually unwind (such as for intrinsics and such).
2602 ///
2603 /// * `abi` - this is the ABI that the function is defined with. This is the
2604 ///   primary factor for determining whether a function can unwind or not.
2605 ///
2606 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2607 /// panics are implemented with unwinds on most platform (when
2608 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2609 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2610 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2611 /// defined for each ABI individually, but it always corresponds to some form of
2612 /// stack-based unwinding (the exact mechanism of which varies
2613 /// platform-by-platform).
2614 ///
2615 /// Rust functions are classfied whether or not they can unwind based on the
2616 /// active "panic strategy". In other words Rust functions are considered to
2617 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2618 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2619 /// only if the final panic mode is panic=abort. In this scenario any code
2620 /// previously compiled assuming that a function can unwind is still correct, it
2621 /// just never happens to actually unwind at runtime.
2622 ///
2623 /// This function's answer to whether or not a function can unwind is quite
2624 /// impactful throughout the compiler. This affects things like:
2625 ///
2626 /// * Calling a function which can't unwind means codegen simply ignores any
2627 ///   associated unwinding cleanup.
2628 /// * Calling a function which can unwind from a function which can't unwind
2629 ///   causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2630 ///   aborts the process.
2631 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2632 ///   affects various optimizations and codegen.
2633 ///
2634 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2635 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2636 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2637 /// might (from a foreign exception or similar).
2638 #[inline]
2639 pub fn fn_can_unwind(
2640     tcx: TyCtxt<'tcx>,
2641     codegen_fn_attr_flags: CodegenFnAttrFlags,
2642     abi: SpecAbi,
2643 ) -> bool {
2644     // Special attribute for functions which can't unwind.
2645     if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2646         return false;
2647     }
2648
2649     // Otherwise if this isn't special then unwinding is generally determined by
2650     // the ABI of the itself. ABIs like `C` have variants which also
2651     // specifically allow unwinding (`C-unwind`), but not all platform-specific
2652     // ABIs have such an option. Otherwise the only other thing here is Rust
2653     // itself, and those ABIs are determined by the panic strategy configured
2654     // for this compilation.
2655     //
2656     // Unfortunately at this time there's also another caveat. Rust [RFC
2657     // 2945][rfc] has been accepted and is in the process of being implemented
2658     // and stabilized. In this interim state we need to deal with historical
2659     // rustc behavior as well as plan for future rustc behavior.
2660     //
2661     // Historically functions declared with `extern "C"` were marked at the
2662     // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2663     // or not. This is UB for functions in `panic=unwind` mode that then
2664     // actually panic and unwind. Note that this behavior is true for both
2665     // externally declared functions as well as Rust-defined function.
2666     //
2667     // To fix this UB rustc would like to change in the future to catch unwinds
2668     // from function calls that may unwind within a Rust-defined `extern "C"`
2669     // function and forcibly abort the process, thereby respecting the
2670     // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2671     // ready to roll out, so determining whether or not the `C` family of ABIs
2672     // unwinds is conditional not only on their definition but also whether the
2673     // `#![feature(c_unwind)]` feature gate is active.
2674     //
2675     // Note that this means that unlike historical compilers rustc now, by
2676     // default, unconditionally thinks that the `C` ABI may unwind. This will
2677     // prevent some optimization opportunities, however, so we try to scope this
2678     // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2679     // to `panic=abort`).
2680     //
2681     // Eventually the check against `c_unwind` here will ideally get removed and
2682     // this'll be a little cleaner as it'll be a straightforward check of the
2683     // ABI.
2684     //
2685     // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2686     use SpecAbi::*;
2687     match abi {
2688         C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
2689             unwind
2690                 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2691         }
2692         Cdecl
2693         | Fastcall
2694         | Vectorcall
2695         | Aapcs
2696         | Win64
2697         | SysV64
2698         | PtxKernel
2699         | Msp430Interrupt
2700         | X86Interrupt
2701         | AmdGpuKernel
2702         | EfiApi
2703         | AvrInterrupt
2704         | AvrNonBlockingInterrupt
2705         | CCmseNonSecureCall
2706         | Wasm
2707         | RustIntrinsic
2708         | PlatformIntrinsic
2709         | Unadjusted => false,
2710         Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2711     }
2712 }
2713
2714 #[inline]
2715 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2716     use rustc_target::spec::abi::Abi::*;
2717     match tcx.sess.target.adjust_abi(abi) {
2718         RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2719
2720         // It's the ABI's job to select this, not ours.
2721         System { .. } => bug!("system abi should be selected elsewhere"),
2722         EfiApi => bug!("eficall abi should be selected elsewhere"),
2723
2724         Stdcall { .. } => Conv::X86Stdcall,
2725         Fastcall => Conv::X86Fastcall,
2726         Vectorcall => Conv::X86VectorCall,
2727         Thiscall { .. } => Conv::X86ThisCall,
2728         C { .. } => Conv::C,
2729         Unadjusted => Conv::C,
2730         Win64 => Conv::X86_64Win64,
2731         SysV64 => Conv::X86_64SysV,
2732         Aapcs => Conv::ArmAapcs,
2733         CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2734         PtxKernel => Conv::PtxKernel,
2735         Msp430Interrupt => Conv::Msp430Intr,
2736         X86Interrupt => Conv::X86Intr,
2737         AmdGpuKernel => Conv::AmdGpuKernel,
2738         AvrInterrupt => Conv::AvrInterrupt,
2739         AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2740         Wasm => Conv::C,
2741
2742         // These API constants ought to be more specific...
2743         Cdecl => Conv::C,
2744     }
2745 }
2746
2747 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2748 where
2749     C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2750         + HasDataLayout
2751         + HasTargetSpec
2752         + HasTyCtxt<'tcx>
2753         + HasParamEnv<'tcx>,
2754 {
2755     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2756         call::FnAbi::new_internal(cx, sig, extra_args, None, CodegenFnAttrFlags::empty(), false)
2757     }
2758
2759     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2760         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2761
2762         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2763             Some(cx.tcx().caller_location_ty())
2764         } else {
2765             None
2766         };
2767
2768         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2769
2770         call::FnAbi::new_internal(
2771             cx,
2772             sig,
2773             extra_args,
2774             caller_location,
2775             attrs,
2776             matches!(instance.def, ty::InstanceDef::Virtual(..)),
2777         )
2778     }
2779
2780     fn new_internal(
2781         cx: &C,
2782         sig: ty::PolyFnSig<'tcx>,
2783         extra_args: &[Ty<'tcx>],
2784         caller_location: Option<Ty<'tcx>>,
2785         codegen_fn_attr_flags: CodegenFnAttrFlags,
2786         force_thin_self_ptr: bool,
2787     ) -> Self {
2788         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2789
2790         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2791
2792         let conv = conv_from_spec_abi(cx.tcx(), sig.abi);
2793
2794         let mut inputs = sig.inputs();
2795         let extra_args = if sig.abi == RustCall {
2796             assert!(!sig.c_variadic && extra_args.is_empty());
2797
2798             if let Some(input) = sig.inputs().last() {
2799                 if let ty::Tuple(tupled_arguments) = input.kind() {
2800                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2801                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2802                 } else {
2803                     bug!(
2804                         "argument to function with \"rust-call\" ABI \
2805                             is not a tuple"
2806                     );
2807                 }
2808             } else {
2809                 bug!(
2810                     "argument to function with \"rust-call\" ABI \
2811                         is not a tuple"
2812                 );
2813             }
2814         } else {
2815             assert!(sig.c_variadic || extra_args.is_empty());
2816             extra_args.to_vec()
2817         };
2818
2819         let target = &cx.tcx().sess.target;
2820         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2821         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2822         let linux_s390x_gnu_like =
2823             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2824         let linux_sparc64_gnu_like =
2825             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2826         let linux_powerpc_gnu_like =
2827             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2828         use SpecAbi::*;
2829         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2830
2831         // Handle safe Rust thin and fat pointers.
2832         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2833                                       scalar: &Scalar,
2834                                       layout: TyAndLayout<'tcx>,
2835                                       offset: Size,
2836                                       is_return: bool| {
2837             // Booleans are always an i1 that needs to be zero-extended.
2838             if scalar.is_bool() {
2839                 attrs.ext(ArgExtension::Zext);
2840                 return;
2841             }
2842
2843             // Only pointer types handled below.
2844             if scalar.value != Pointer {
2845                 return;
2846             }
2847
2848             if !scalar.valid_range.contains_zero() {
2849                 attrs.set(ArgAttribute::NonNull);
2850             }
2851
2852             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2853                 if let Some(kind) = pointee.safe {
2854                     attrs.pointee_align = Some(pointee.align);
2855
2856                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2857                     // for the entire duration of the function as they can be deallocated
2858                     // at any time. Set their valid size to 0.
2859                     attrs.pointee_size = match kind {
2860                         PointerKind::UniqueOwned => Size::ZERO,
2861                         _ => pointee.size,
2862                     };
2863
2864                     // `Box` pointer parameters never alias because ownership is transferred
2865                     // `&mut` pointer parameters never alias other parameters,
2866                     // or mutable global data
2867                     //
2868                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2869                     // and can be marked as both `readonly` and `noalias`, as
2870                     // LLVM's definition of `noalias` is based solely on memory
2871                     // dependencies rather than pointer equality
2872                     //
2873                     // Due to miscompiles in LLVM < 12, we apply a separate NoAliasMutRef attribute
2874                     // for UniqueBorrowed arguments, so that the codegen backend can decide
2875                     // whether or not to actually emit the attribute.
2876                     let no_alias = match kind {
2877                         PointerKind::Shared | PointerKind::UniqueBorrowed => false,
2878                         PointerKind::UniqueOwned => true,
2879                         PointerKind::Frozen => !is_return,
2880                     };
2881                     if no_alias {
2882                         attrs.set(ArgAttribute::NoAlias);
2883                     }
2884
2885                     if kind == PointerKind::Frozen && !is_return {
2886                         attrs.set(ArgAttribute::ReadOnly);
2887                     }
2888
2889                     if kind == PointerKind::UniqueBorrowed && !is_return {
2890                         attrs.set(ArgAttribute::NoAliasMutRef);
2891                     }
2892                 }
2893             }
2894         };
2895
2896         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2897             let is_return = arg_idx.is_none();
2898
2899             let layout = cx.layout_of(ty);
2900             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2901                 // Don't pass the vtable, it's not an argument of the virtual fn.
2902                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2903                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2904                 make_thin_self_ptr(cx, layout)
2905             } else {
2906                 layout
2907             };
2908
2909             let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2910                 let mut attrs = ArgAttributes::new();
2911                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2912                 attrs
2913             });
2914
2915             if arg.layout.is_zst() {
2916                 // For some forsaken reason, x86_64-pc-windows-gnu
2917                 // doesn't ignore zero-sized struct arguments.
2918                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2919                 if is_return
2920                     || rust_abi
2921                     || (!win_x64_gnu
2922                         && !linux_s390x_gnu_like
2923                         && !linux_sparc64_gnu_like
2924                         && !linux_powerpc_gnu_like)
2925                 {
2926                     arg.mode = PassMode::Ignore;
2927                 }
2928             }
2929
2930             arg
2931         };
2932
2933         let mut fn_abi = FnAbi {
2934             ret: arg_of(sig.output(), None),
2935             args: inputs
2936                 .iter()
2937                 .cloned()
2938                 .chain(extra_args)
2939                 .chain(caller_location)
2940                 .enumerate()
2941                 .map(|(i, ty)| arg_of(ty, Some(i)))
2942                 .collect(),
2943             c_variadic: sig.c_variadic,
2944             fixed_count: inputs.len(),
2945             conv,
2946             can_unwind: fn_can_unwind(cx.tcx(), codegen_fn_attr_flags, sig.abi),
2947         };
2948         fn_abi.adjust_for_abi(cx, sig.abi);
2949         debug!("FnAbi::new_internal = {:?}", fn_abi);
2950         fn_abi
2951     }
2952
2953     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2954         if abi == SpecAbi::Unadjusted {
2955             return;
2956         }
2957
2958         if abi == SpecAbi::Rust
2959             || abi == SpecAbi::RustCall
2960             || abi == SpecAbi::RustIntrinsic
2961             || abi == SpecAbi::PlatformIntrinsic
2962         {
2963             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2964                 if arg.is_ignore() {
2965                     return;
2966                 }
2967
2968                 match arg.layout.abi {
2969                     Abi::Aggregate { .. } => {}
2970
2971                     // This is a fun case! The gist of what this is doing is
2972                     // that we want callers and callees to always agree on the
2973                     // ABI of how they pass SIMD arguments. If we were to *not*
2974                     // make these arguments indirect then they'd be immediates
2975                     // in LLVM, which means that they'd used whatever the
2976                     // appropriate ABI is for the callee and the caller. That
2977                     // means, for example, if the caller doesn't have AVX
2978                     // enabled but the callee does, then passing an AVX argument
2979                     // across this boundary would cause corrupt data to show up.
2980                     //
2981                     // This problem is fixed by unconditionally passing SIMD
2982                     // arguments through memory between callers and callees
2983                     // which should get them all to agree on ABI regardless of
2984                     // target feature sets. Some more information about this
2985                     // issue can be found in #44367.
2986                     //
2987                     // Note that the platform intrinsic ABI is exempt here as
2988                     // that's how we connect up to LLVM and it's unstable
2989                     // anyway, we control all calls to it in libstd.
2990                     Abi::Vector { .. }
2991                         if abi != SpecAbi::PlatformIntrinsic
2992                             && cx.tcx().sess.target.simd_types_indirect =>
2993                     {
2994                         arg.make_indirect();
2995                         return;
2996                     }
2997
2998                     _ => return,
2999                 }
3000
3001                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
3002                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
3003                 let max_by_val_size = Pointer.size(cx) * 2;
3004                 let size = arg.layout.size;
3005
3006                 if arg.layout.is_unsized() || size > max_by_val_size {
3007                     arg.make_indirect();
3008                 } else {
3009                     // We want to pass small aggregates as immediates, but using
3010                     // a LLVM aggregate type for this leads to bad optimizations,
3011                     // so we pick an appropriately sized integer type instead.
3012                     arg.cast_to(Reg { kind: RegKind::Integer, size });
3013                 }
3014             };
3015             fixup(&mut self.ret);
3016             for arg in &mut self.args {
3017                 fixup(arg);
3018             }
3019             return;
3020         }
3021
3022         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
3023             cx.tcx().sess.fatal(&msg);
3024         }
3025     }
3026 }
3027
3028 fn make_thin_self_ptr<'tcx>(
3029     cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3030     layout: TyAndLayout<'tcx>,
3031 ) -> TyAndLayout<'tcx> {
3032     let tcx = cx.tcx();
3033     let fat_pointer_ty = if layout.is_unsized() {
3034         // unsized `self` is passed as a pointer to `self`
3035         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3036         tcx.mk_mut_ptr(layout.ty)
3037     } else {
3038         match layout.abi {
3039             Abi::ScalarPair(..) => (),
3040             _ => bug!("receiver type has unsupported layout: {:?}", layout),
3041         }
3042
3043         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3044         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3045         // elsewhere in the compiler as a method on a `dyn Trait`.
3046         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3047         // get a built-in pointer type
3048         let mut fat_pointer_layout = layout;
3049         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3050             && !fat_pointer_layout.ty.is_region_ptr()
3051         {
3052             for i in 0..fat_pointer_layout.fields.count() {
3053                 let field_layout = fat_pointer_layout.field(cx, i);
3054
3055                 if !field_layout.is_zst() {
3056                     fat_pointer_layout = field_layout;
3057                     continue 'descend_newtypes;
3058                 }
3059             }
3060
3061             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3062         }
3063
3064         fat_pointer_layout.ty
3065     };
3066
3067     // we now have a type like `*mut RcBox<dyn Trait>`
3068     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3069     // this is understood as a special case elsewhere in the compiler
3070     let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3071
3072     TyAndLayout {
3073         ty: fat_pointer_ty,
3074
3075         // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3076         // should always work because the type is always `*mut ()`.
3077         ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3078     }
3079 }