]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
More review comments
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast::{self as ast, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'tcx>(
34         tcx: TyCtxt<'tcx>,
35         ty: Ty<'tcx>,
36         repr: &ReprOptions,
37         min: i128,
38         max: i128,
39     ) -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Gets the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Finds the appropriate Integer type and signedness for the given
75     /// signed discriminant range and `#[repr]` attribute.
76     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'tcx>(
79         tcx: TyCtxt<'tcx>,
80         ty: Ty<'tcx>,
81         repr: &ReprOptions,
82         min: i128,
83         max: i128,
84     ) -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!(
100                     "Integer::repr_discr: `#[repr]` hint too small for \
101                       discriminant range of enum `{}",
102                     ty
103                 )
104             }
105             return (discr, ity.is_signed());
106         }
107
108         if repr.c() {
109             match &tcx.sess.target.arch[..] {
110                 // WARNING: the ARM EABI has two variants; the one corresponding
111                 // to `at_least == I32` appears to be used on Linux and NetBSD,
112                 // but some systems may use the variant corresponding to no
113                 // lower bound. However, we don't run on those yet...?
114                 "arm" => min_from_extern = Some(I32),
115                 _ => min_from_extern = Some(I32),
116             }
117         }
118
119         let at_least = min_from_extern.unwrap_or(min_default);
120
121         // If there are no negative values, we can use the unsigned fit.
122         if min >= 0 {
123             (cmp::max(unsigned_fit, at_least), false)
124         } else {
125             (cmp::max(signed_fit, at_least), true)
126         }
127     }
128 }
129
130 pub trait PrimitiveExt {
131     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
133 }
134
135 impl PrimitiveExt for Primitive {
136     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
137         match *self {
138             Int(i, signed) => i.to_ty(tcx, signed),
139             F32 => tcx.types.f32,
140             F64 => tcx.types.f64,
141             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142         }
143     }
144
145     /// Return an *integer* type matching this primitive.
146     /// Useful in particular when dealing with enum discriminants.
147     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148         match *self {
149             Int(i, signed) => i.to_ty(tcx, signed),
150             Pointer => tcx.types.usize,
151             F32 | F64 => bug!("floats do not have an int type"),
152         }
153     }
154 }
155
156 /// The first half of a fat pointer.
157 ///
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
161
162 /// The second half of a fat pointer.
163 ///
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
167
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
170     Unknown(Ty<'tcx>),
171     SizeOverflow(Ty<'tcx>),
172 }
173
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
176         match *self {
177             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
178             LayoutError::SizeOverflow(ty) => {
179                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
180             }
181         }
182     }
183 }
184
185 fn layout_raw<'tcx>(
186     tcx: TyCtxt<'tcx>,
187     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189     ty::tls::with_related_context(tcx, move |icx| {
190         let (param_env, ty) = query.into_parts();
191
192         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
194         }
195
196         // Update the ImplicitCtxt to increase the layout_depth
197         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
198
199         ty::tls::enter_context(&icx, |_| {
200             let cx = LayoutCx { tcx, param_env };
201             let layout = cx.layout_raw_uncached(ty);
202             // Type-level uninhabitedness should always imply ABI uninhabitedness.
203             if let Ok(layout) = layout {
204                 if ty.conservative_is_privately_uninhabited(tcx) {
205                     assert!(layout.abi.is_uninhabited());
206                 }
207             }
208             layout
209         })
210     })
211 }
212
213 pub fn provide(providers: &mut ty::query::Providers) {
214     *providers = ty::query::Providers { layout_raw, ..*providers };
215 }
216
217 pub struct LayoutCx<'tcx, C> {
218     pub tcx: C,
219     pub param_env: ty::ParamEnv<'tcx>,
220 }
221
222 #[derive(Copy, Clone, Debug)]
223 enum StructKind {
224     /// A tuple, closure, or univariant which cannot be coerced to unsized.
225     AlwaysSized,
226     /// A univariant, the last field of which may be coerced to unsized.
227     MaybeUnsized,
228     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229     Prefixed(Size, Align),
230 }
231
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238     let mut inverse = vec![0; map.len()];
239     for i in 0..map.len() {
240         inverse[map[i] as usize] = i as u32;
241     }
242     inverse
243 }
244
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247         let dl = self.data_layout();
248         let b_align = b.value.align(dl);
249         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250         let b_offset = a.value.size(dl).align_to(b_align.abi);
251         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
252
253         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254         // returns the last maximum.
255         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256             .into_iter()
257             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258             .max_by_key(|niche| niche.available(dl));
259
260         Layout {
261             variants: Variants::Single { index: VariantIdx::new(0) },
262             fields: FieldsShape::Arbitrary {
263                 offsets: vec![Size::ZERO, b_offset],
264                 memory_index: vec![0, 1],
265             },
266             abi: Abi::ScalarPair(a, b),
267             largest_niche,
268             align,
269             size,
270         }
271     }
272
273     fn univariant_uninterned(
274         &self,
275         ty: Ty<'tcx>,
276         fields: &[TyAndLayout<'_>],
277         repr: &ReprOptions,
278         kind: StructKind,
279     ) -> Result<Layout, LayoutError<'tcx>> {
280         let dl = self.data_layout();
281         let pack = repr.pack;
282         if pack.is_some() && repr.align.is_some() {
283             bug!("struct cannot be packed and aligned");
284         }
285
286         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
287
288         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
289
290         let optimize = !repr.inhibit_struct_field_reordering_opt();
291         if optimize {
292             let end =
293                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294             let optimizing = &mut inverse_memory_index[..end];
295             let field_align = |f: &TyAndLayout<'_>| {
296                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
297             };
298             match kind {
299                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300                     optimizing.sort_by_key(|&x| {
301                         // Place ZSTs first to avoid "interesting offsets",
302                         // especially with only one or two non-ZST fields.
303                         let f = &fields[x as usize];
304                         (!f.is_zst(), cmp::Reverse(field_align(f)))
305                     });
306                 }
307                 StructKind::Prefixed(..) => {
308                     // Sort in ascending alignment so that the layout stay optimal
309                     // regardless of the prefix
310                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
311                 }
312             }
313         }
314
315         // inverse_memory_index holds field indices by increasing memory offset.
316         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317         // We now write field offsets to the corresponding offset slot;
318         // field 5 with offset 0 puts 0 in offsets[5].
319         // At the bottom of this function, we invert `inverse_memory_index` to
320         // produce `memory_index` (see `invert_mapping`).
321
322         let mut sized = true;
323         let mut offsets = vec![Size::ZERO; fields.len()];
324         let mut offset = Size::ZERO;
325         let mut largest_niche = None;
326         let mut largest_niche_available = 0;
327
328         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
329             let prefix_align =
330                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331             align = align.max(AbiAndPrefAlign::new(prefix_align));
332             offset = prefix_size.align_to(prefix_align);
333         }
334
335         for &i in &inverse_memory_index {
336             let field = fields[i as usize];
337             if !sized {
338                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
339             }
340
341             if field.is_unsized() {
342                 sized = false;
343             }
344
345             // Invariant: offset < dl.obj_size_bound() <= 1<<61
346             let field_align = if let Some(pack) = pack {
347                 field.align.min(AbiAndPrefAlign::new(pack))
348             } else {
349                 field.align
350             };
351             offset = offset.align_to(field_align.abi);
352             align = align.max(field_align);
353
354             debug!("univariant offset: {:?} field: {:#?}", offset, field);
355             offsets[i as usize] = offset;
356
357             if !repr.hide_niche() {
358                 if let Some(mut niche) = field.largest_niche.clone() {
359                     let available = niche.available(dl);
360                     if available > largest_niche_available {
361                         largest_niche_available = available;
362                         niche.offset += offset;
363                         largest_niche = Some(niche);
364                     }
365                 }
366             }
367
368             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
369         }
370
371         if let Some(repr_align) = repr.align {
372             align = align.max(AbiAndPrefAlign::new(repr_align));
373         }
374
375         debug!("univariant min_size: {:?}", offset);
376         let min_size = offset;
377
378         // As stated above, inverse_memory_index holds field indices by increasing offset.
379         // This makes it an already-sorted view of the offsets vec.
380         // To invert it, consider:
381         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382         // Field 5 would be the first element, so memory_index is i:
383         // Note: if we didn't optimize, it's already right.
384
385         let memory_index =
386             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
387
388         let size = min_size.align_to(align.abi);
389         let mut abi = Abi::Aggregate { sized };
390
391         // Unpack newtype ABIs and find scalar pairs.
392         if sized && size.bytes() > 0 {
393             // All other fields must be ZSTs.
394             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
395
396             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397                 // We have exactly one non-ZST field.
398                 (Some((i, field)), None, None) => {
399                     // Field fills the struct and it has a scalar or scalar pair ABI.
400                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
401                     {
402                         match field.abi {
403                             // For plain scalars, or vectors of them, we can't unpack
404                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
405                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406                                 abi = field.abi.clone();
407                             }
408                             // But scalar pairs are Rust-specific and get
409                             // treated as aggregates by C ABIs anyway.
410                             Abi::ScalarPair(..) => {
411                                 abi = field.abi.clone();
412                             }
413                             _ => {}
414                         }
415                     }
416                 }
417
418                 // Two non-ZST fields, and they're both scalars.
419                 (
420                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
422                     None,
423                 ) => {
424                     // Order by the memory placement, not source order.
425                     let ((i, a), (j, b)) =
426                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427                     let pair = self.scalar_pair(a.clone(), b.clone());
428                     let pair_offsets = match pair.fields {
429                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430                             assert_eq!(memory_index, &[0, 1]);
431                             offsets
432                         }
433                         _ => bug!(),
434                     };
435                     if offsets[i] == pair_offsets[0]
436                         && offsets[j] == pair_offsets[1]
437                         && align == pair.align
438                         && size == pair.size
439                     {
440                         // We can use `ScalarPair` only when it matches our
441                         // already computed layout (including `#[repr(C)]`).
442                         abi = pair.abi;
443                     }
444                 }
445
446                 _ => {}
447             }
448         }
449
450         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451             abi = Abi::Uninhabited;
452         }
453
454         Ok(Layout {
455             variants: Variants::Single { index: VariantIdx::new(0) },
456             fields: FieldsShape::Arbitrary { offsets, memory_index },
457             abi,
458             largest_niche,
459             align,
460             size,
461         })
462     }
463
464     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
465         let tcx = self.tcx;
466         let param_env = self.param_env;
467         let dl = self.data_layout();
468         let scalar_unit = |value: Primitive| {
469             let bits = value.size(dl).bits();
470             assert!(bits <= 128);
471             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
472         };
473         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
474
475         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
476             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
477         };
478         debug_assert!(!ty.has_infer_types_or_consts());
479
480         Ok(match *ty.kind() {
481             // Basic scalars.
482             ty::Bool => tcx.intern_layout(Layout::scalar(
483                 self,
484                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
485             )),
486             ty::Char => tcx.intern_layout(Layout::scalar(
487                 self,
488                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
489             )),
490             ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491             ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
492             ty::Float(fty) => scalar(match fty {
493                 ast::FloatTy::F32 => F32,
494                 ast::FloatTy::F64 => F64,
495             }),
496             ty::FnPtr(_) => {
497                 let mut ptr = scalar_unit(Pointer);
498                 ptr.valid_range = 1..=*ptr.valid_range.end();
499                 tcx.intern_layout(Layout::scalar(self, ptr))
500             }
501
502             // The never type.
503             ty::Never => tcx.intern_layout(Layout {
504                 variants: Variants::Single { index: VariantIdx::new(0) },
505                 fields: FieldsShape::Primitive,
506                 abi: Abi::Uninhabited,
507                 largest_niche: None,
508                 align: dl.i8_align,
509                 size: Size::ZERO,
510             }),
511
512             // Potentially-wide pointers.
513             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514                 let mut data_ptr = scalar_unit(Pointer);
515                 if !ty.is_unsafe_ptr() {
516                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517                 }
518
519                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
522                 }
523
524                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
525                 let metadata = match unsized_part.kind() {
526                     ty::Foreign(..) => {
527                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
528                     }
529                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
530                     ty::Dynamic(..) => {
531                         let mut vtable = scalar_unit(Pointer);
532                         vtable.valid_range = 1..=*vtable.valid_range.end();
533                         vtable
534                     }
535                     _ => return Err(LayoutError::Unknown(unsized_part)),
536                 };
537
538                 // Effectively a (ptr, meta) tuple.
539                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
540             }
541
542             // Arrays and slices.
543             ty::Array(element, mut count) => {
544                 if count.has_projections() {
545                     count = tcx.normalize_erasing_regions(param_env, count);
546                     if count.has_projections() {
547                         return Err(LayoutError::Unknown(ty));
548                     }
549                 }
550
551                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
552                 let element = self.layout_of(element)?;
553                 let size =
554                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
555
556                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
557                     Abi::Uninhabited
558                 } else {
559                     Abi::Aggregate { sized: true }
560                 };
561
562                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
563
564                 tcx.intern_layout(Layout {
565                     variants: Variants::Single { index: VariantIdx::new(0) },
566                     fields: FieldsShape::Array { stride: element.size, count },
567                     abi,
568                     largest_niche,
569                     align: element.align,
570                     size,
571                 })
572             }
573             ty::Slice(element) => {
574                 let element = self.layout_of(element)?;
575                 tcx.intern_layout(Layout {
576                     variants: Variants::Single { index: VariantIdx::new(0) },
577                     fields: FieldsShape::Array { stride: element.size, count: 0 },
578                     abi: Abi::Aggregate { sized: false },
579                     largest_niche: None,
580                     align: element.align,
581                     size: Size::ZERO,
582                 })
583             }
584             ty::Str => tcx.intern_layout(Layout {
585                 variants: Variants::Single { index: VariantIdx::new(0) },
586                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
587                 abi: Abi::Aggregate { sized: false },
588                 largest_niche: None,
589                 align: dl.i8_align,
590                 size: Size::ZERO,
591             }),
592
593             // Odd unit types.
594             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
595             ty::Dynamic(..) | ty::Foreign(..) => {
596                 let mut unit = self.univariant_uninterned(
597                     ty,
598                     &[],
599                     &ReprOptions::default(),
600                     StructKind::AlwaysSized,
601                 )?;
602                 match unit.abi {
603                     Abi::Aggregate { ref mut sized } => *sized = false,
604                     _ => bug!(),
605                 }
606                 tcx.intern_layout(unit)
607             }
608
609             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
610
611             ty::Closure(_, ref substs) => {
612                 let tys = substs.as_closure().upvar_tys();
613                 univariant(
614                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
615                     &ReprOptions::default(),
616                     StructKind::AlwaysSized,
617                 )?
618             }
619
620             ty::Tuple(tys) => {
621                 let kind =
622                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
623
624                 univariant(
625                     &tys.iter()
626                         .map(|k| self.layout_of(k.expect_ty()))
627                         .collect::<Result<Vec<_>, _>>()?,
628                     &ReprOptions::default(),
629                     kind,
630                 )?
631             }
632
633             // SIMD vector types.
634             ty::Adt(def, substs) if def.repr.simd() => {
635                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
636                 //
637                 // * #[repr(simd)] struct S(T, T, T, T);
638                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
639                 // * #[repr(simd)] struct S([T; 4])
640                 //
641                 // where T is a primitive scalar (integer/float/pointer).
642
643                 // SIMD vectors with zero fields are not supported.
644                 // (should be caught by typeck)
645                 if def.non_enum_variant().fields.is_empty() {
646                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
647                 }
648
649                 // Type of the first ADT field:
650                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
651
652                 // Heterogeneous SIMD vectors are not supported:
653                 // (should be caught by typeck)
654                 for fi in &def.non_enum_variant().fields {
655                     if fi.ty(tcx, substs) != f0_ty {
656                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
657                     }
658                 }
659
660                 // The element type and number of elements of the SIMD vector
661                 // are obtained from:
662                 //
663                 // * the element type and length of the single array field, if
664                 // the first field is of array type, or
665                 //
666                 // * the homogenous field type and the number of fields.
667                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
668                     // First ADT field is an array:
669
670                     // SIMD vectors with multiple array fields are not supported:
671                     // (should be caught by typeck)
672                     if def.non_enum_variant().fields.len() != 1 {
673                         tcx.sess.fatal(&format!(
674                             "monomorphising SIMD type `{}` with more than one array field",
675                             ty
676                         ));
677                     }
678
679                     // Extract the number of elements from the layout of the array field:
680                     let len = if let Ok(TyAndLayout {
681                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
682                         ..
683                     }) = self.layout_of(f0_ty)
684                     {
685                         count
686                     } else {
687                         return Err(LayoutError::Unknown(ty));
688                     };
689
690                     (*e_ty, *len, true)
691                 } else {
692                     // First ADT field is not an array:
693                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
694                 };
695
696                 // SIMD vectors of zero length are not supported.
697                 //
698                 // Can't be caught in typeck if the array length is generic.
699                 if e_len == 0 {
700                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
701                 }
702
703                 // Compute the ABI of the element type:
704                 let e_ly = self.layout_of(e_ty)?;
705                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
706                     scalar.clone()
707                 } else {
708                     // This error isn't caught in typeck, e.g., if
709                     // the element type of the vector is generic.
710                     tcx.sess.fatal(&format!(
711                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
712                         (integer/float/pointer) element type `{}`",
713                         ty, e_ty
714                     ))
715                 };
716
717                 // Compute the size and alignment of the vector:
718                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
719                 let align = dl.vector_align(size);
720                 let size = size.align_to(align.abi);
721
722                 // Compute the placement of the vector fields:
723                 let fields = if is_array {
724                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
725                 } else {
726                     FieldsShape::Array { stride: e_ly.size, count: e_len }
727                 };
728
729                 tcx.intern_layout(Layout {
730                     variants: Variants::Single { index: VariantIdx::new(0) },
731                     fields,
732                     abi: Abi::Vector { element: e_abi, count: e_len },
733                     largest_niche: e_ly.largest_niche.clone(),
734                     size,
735                     align,
736                 })
737             }
738
739             // ADTs.
740             ty::Adt(def, substs) => {
741                 // Cache the field layouts.
742                 let variants = def
743                     .variants
744                     .iter()
745                     .map(|v| {
746                         v.fields
747                             .iter()
748                             .map(|field| self.layout_of(field.ty(tcx, substs)))
749                             .collect::<Result<Vec<_>, _>>()
750                     })
751                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
752
753                 if def.is_union() {
754                     if def.repr.pack.is_some() && def.repr.align.is_some() {
755                         bug!("union cannot be packed and aligned");
756                     }
757
758                     let mut align =
759                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
760
761                     if let Some(repr_align) = def.repr.align {
762                         align = align.max(AbiAndPrefAlign::new(repr_align));
763                     }
764
765                     let optimize = !def.repr.inhibit_union_abi_opt();
766                     let mut size = Size::ZERO;
767                     let mut abi = Abi::Aggregate { sized: true };
768                     let index = VariantIdx::new(0);
769                     for field in &variants[index] {
770                         assert!(!field.is_unsized());
771                         align = align.max(field.align);
772
773                         // If all non-ZST fields have the same ABI, forward this ABI
774                         if optimize && !field.is_zst() {
775                             // Normalize scalar_unit to the maximal valid range
776                             let field_abi = match &field.abi {
777                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
778                                 Abi::ScalarPair(x, y) => {
779                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
780                                 }
781                                 Abi::Vector { element: x, count } => {
782                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
783                                 }
784                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
785                                     Abi::Aggregate { sized: true }
786                                 }
787                             };
788
789                             if size == Size::ZERO {
790                                 // first non ZST: initialize 'abi'
791                                 abi = field_abi;
792                             } else if abi != field_abi {
793                                 // different fields have different ABI: reset to Aggregate
794                                 abi = Abi::Aggregate { sized: true };
795                             }
796                         }
797
798                         size = cmp::max(size, field.size);
799                     }
800
801                     if let Some(pack) = def.repr.pack {
802                         align = align.min(AbiAndPrefAlign::new(pack));
803                     }
804
805                     return Ok(tcx.intern_layout(Layout {
806                         variants: Variants::Single { index },
807                         fields: FieldsShape::Union(
808                             NonZeroUsize::new(variants[index].len())
809                                 .ok_or(LayoutError::Unknown(ty))?,
810                         ),
811                         abi,
812                         largest_niche: None,
813                         align,
814                         size: size.align_to(align.abi),
815                     }));
816                 }
817
818                 // A variant is absent if it's uninhabited and only has ZST fields.
819                 // Present uninhabited variants only require space for their fields,
820                 // but *not* an encoding of the discriminant (e.g., a tag value).
821                 // See issue #49298 for more details on the need to leave space
822                 // for non-ZST uninhabited data (mostly partial initialization).
823                 let absent = |fields: &[TyAndLayout<'_>]| {
824                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
825                     let is_zst = fields.iter().all(|f| f.is_zst());
826                     uninhabited && is_zst
827                 };
828                 let (present_first, present_second) = {
829                     let mut present_variants = variants
830                         .iter_enumerated()
831                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
832                     (present_variants.next(), present_variants.next())
833                 };
834                 let present_first = match present_first {
835                     Some(present_first) => present_first,
836                     // Uninhabited because it has no variants, or only absent ones.
837                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
838                     // If it's a struct, still compute a layout so that we can still compute the
839                     // field offsets.
840                     None => VariantIdx::new(0),
841                 };
842
843                 let is_struct = !def.is_enum() ||
844                     // Only one variant is present.
845                     (present_second.is_none() &&
846                     // Representation optimizations are allowed.
847                     !def.repr.inhibit_enum_layout_opt());
848                 if is_struct {
849                     // Struct, or univariant enum equivalent to a struct.
850                     // (Typechecking will reject discriminant-sizing attrs.)
851
852                     let v = present_first;
853                     let kind = if def.is_enum() || variants[v].is_empty() {
854                         StructKind::AlwaysSized
855                     } else {
856                         let param_env = tcx.param_env(def.did);
857                         let last_field = def.variants[v].fields.last().unwrap();
858                         let always_sized =
859                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
860                         if !always_sized {
861                             StructKind::MaybeUnsized
862                         } else {
863                             StructKind::AlwaysSized
864                         }
865                     };
866
867                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
868                     st.variants = Variants::Single { index: v };
869                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
870                     match st.abi {
871                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
872                             // the asserts ensure that we are not using the
873                             // `#[rustc_layout_scalar_valid_range(n)]`
874                             // attribute to widen the range of anything as that would probably
875                             // result in UB somewhere
876                             // FIXME(eddyb) the asserts are probably not needed,
877                             // as larger validity ranges would result in missed
878                             // optimizations, *not* wrongly assuming the inner
879                             // value is valid. e.g. unions enlarge validity ranges,
880                             // because the values may be uninitialized.
881                             if let Bound::Included(start) = start {
882                                 // FIXME(eddyb) this might be incorrect - it doesn't
883                                 // account for wrap-around (end < start) ranges.
884                                 assert!(*scalar.valid_range.start() <= start);
885                                 scalar.valid_range = start..=*scalar.valid_range.end();
886                             }
887                             if let Bound::Included(end) = end {
888                                 // FIXME(eddyb) this might be incorrect - it doesn't
889                                 // account for wrap-around (end < start) ranges.
890                                 assert!(*scalar.valid_range.end() >= end);
891                                 scalar.valid_range = *scalar.valid_range.start()..=end;
892                             }
893
894                             // Update `largest_niche` if we have introduced a larger niche.
895                             let niche = if def.repr.hide_niche() {
896                                 None
897                             } else {
898                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
899                             };
900                             if let Some(niche) = niche {
901                                 match &st.largest_niche {
902                                     Some(largest_niche) => {
903                                         // Replace the existing niche even if they're equal,
904                                         // because this one is at a lower offset.
905                                         if largest_niche.available(dl) <= niche.available(dl) {
906                                             st.largest_niche = Some(niche);
907                                         }
908                                     }
909                                     None => st.largest_niche = Some(niche),
910                                 }
911                             }
912                         }
913                         _ => assert!(
914                             start == Bound::Unbounded && end == Bound::Unbounded,
915                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
916                             def,
917                             st,
918                         ),
919                     }
920
921                     return Ok(tcx.intern_layout(st));
922                 }
923
924                 // At this point, we have handled all unions and
925                 // structs. (We have also handled univariant enums
926                 // that allow representation optimization.)
927                 assert!(def.is_enum());
928
929                 // The current code for niche-filling relies on variant indices
930                 // instead of actual discriminants, so dataful enums with
931                 // explicit discriminants (RFC #2363) would misbehave.
932                 let no_explicit_discriminants = def
933                     .variants
934                     .iter_enumerated()
935                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
936
937                 let mut niche_filling_layout = None;
938
939                 // Niche-filling enum optimization.
940                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
941                     let mut dataful_variant = None;
942                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
943
944                     // Find one non-ZST variant.
945                     'variants: for (v, fields) in variants.iter_enumerated() {
946                         if absent(fields) {
947                             continue 'variants;
948                         }
949                         for f in fields {
950                             if !f.is_zst() {
951                                 if dataful_variant.is_none() {
952                                     dataful_variant = Some(v);
953                                     continue 'variants;
954                                 } else {
955                                     dataful_variant = None;
956                                     break 'variants;
957                                 }
958                             }
959                         }
960                         niche_variants = *niche_variants.start().min(&v)..=v;
961                     }
962
963                     if niche_variants.start() > niche_variants.end() {
964                         dataful_variant = None;
965                     }
966
967                     if let Some(i) = dataful_variant {
968                         let count = (niche_variants.end().as_u32()
969                             - niche_variants.start().as_u32()
970                             + 1) as u128;
971
972                         // Find the field with the largest niche
973                         let niche_candidate = variants[i]
974                             .iter()
975                             .enumerate()
976                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
977                             .max_by_key(|(_, niche)| niche.available(dl));
978
979                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
980                             niche_candidate.and_then(|(field_index, niche)| {
981                                 Some((field_index, niche, niche.reserve(self, count)?))
982                             })
983                         {
984                             let mut align = dl.aggregate_align;
985                             let st = variants
986                                 .iter_enumerated()
987                                 .map(|(j, v)| {
988                                     let mut st = self.univariant_uninterned(
989                                         ty,
990                                         v,
991                                         &def.repr,
992                                         StructKind::AlwaysSized,
993                                     )?;
994                                     st.variants = Variants::Single { index: j };
995
996                                     align = align.max(st.align);
997
998                                     Ok(st)
999                                 })
1000                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1001
1002                             let offset = st[i].fields.offset(field_index) + niche.offset;
1003                             let size = st[i].size;
1004
1005                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1006                                 Abi::Uninhabited
1007                             } else {
1008                                 match st[i].abi {
1009                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1010                                     Abi::ScalarPair(ref first, ref second) => {
1011                                         // We need to use scalar_unit to reset the
1012                                         // valid range to the maximal one for that
1013                                         // primitive, because only the niche is
1014                                         // guaranteed to be initialised, not the
1015                                         // other primitive.
1016                                         if offset.bytes() == 0 {
1017                                             Abi::ScalarPair(
1018                                                 niche_scalar.clone(),
1019                                                 scalar_unit(second.value),
1020                                             )
1021                                         } else {
1022                                             Abi::ScalarPair(
1023                                                 scalar_unit(first.value),
1024                                                 niche_scalar.clone(),
1025                                             )
1026                                         }
1027                                     }
1028                                     _ => Abi::Aggregate { sized: true },
1029                                 }
1030                             };
1031
1032                             let largest_niche =
1033                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1034
1035                             niche_filling_layout = Some(Layout {
1036                                 variants: Variants::Multiple {
1037                                     tag: niche_scalar,
1038                                     tag_encoding: TagEncoding::Niche {
1039                                         dataful_variant: i,
1040                                         niche_variants,
1041                                         niche_start,
1042                                     },
1043                                     tag_field: 0,
1044                                     variants: st,
1045                                 },
1046                                 fields: FieldsShape::Arbitrary {
1047                                     offsets: vec![offset],
1048                                     memory_index: vec![0],
1049                                 },
1050                                 abi,
1051                                 largest_niche,
1052                                 size,
1053                                 align,
1054                             });
1055                         }
1056                     }
1057                 }
1058
1059                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1060                 let discr_type = def.repr.discr_type();
1061                 let bits = Integer::from_attr(self, discr_type).size().bits();
1062                 for (i, discr) in def.discriminants(tcx) {
1063                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1064                         continue;
1065                     }
1066                     let mut x = discr.val as i128;
1067                     if discr_type.is_signed() {
1068                         // sign extend the raw representation to be an i128
1069                         x = (x << (128 - bits)) >> (128 - bits);
1070                     }
1071                     if x < min {
1072                         min = x;
1073                     }
1074                     if x > max {
1075                         max = x;
1076                     }
1077                 }
1078                 // We might have no inhabited variants, so pretend there's at least one.
1079                 if (min, max) == (i128::MAX, i128::MIN) {
1080                     min = 0;
1081                     max = 0;
1082                 }
1083                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1084                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1085
1086                 let mut align = dl.aggregate_align;
1087                 let mut size = Size::ZERO;
1088
1089                 // We're interested in the smallest alignment, so start large.
1090                 let mut start_align = Align::from_bytes(256).unwrap();
1091                 assert_eq!(Integer::for_align(dl, start_align), None);
1092
1093                 // repr(C) on an enum tells us to make a (tag, union) layout,
1094                 // so we need to grow the prefix alignment to be at least
1095                 // the alignment of the union. (This value is used both for
1096                 // determining the alignment of the overall enum, and the
1097                 // determining the alignment of the payload after the tag.)
1098                 let mut prefix_align = min_ity.align(dl).abi;
1099                 if def.repr.c() {
1100                     for fields in &variants {
1101                         for field in fields {
1102                             prefix_align = prefix_align.max(field.align.abi);
1103                         }
1104                     }
1105                 }
1106
1107                 // Create the set of structs that represent each variant.
1108                 let mut layout_variants = variants
1109                     .iter_enumerated()
1110                     .map(|(i, field_layouts)| {
1111                         let mut st = self.univariant_uninterned(
1112                             ty,
1113                             &field_layouts,
1114                             &def.repr,
1115                             StructKind::Prefixed(min_ity.size(), prefix_align),
1116                         )?;
1117                         st.variants = Variants::Single { index: i };
1118                         // Find the first field we can't move later
1119                         // to make room for a larger discriminant.
1120                         for field in
1121                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1122                         {
1123                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1124                                 start_align = start_align.min(field.align.abi);
1125                                 break;
1126                             }
1127                         }
1128                         size = cmp::max(size, st.size);
1129                         align = align.max(st.align);
1130                         Ok(st)
1131                     })
1132                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1133
1134                 // Align the maximum variant size to the largest alignment.
1135                 size = size.align_to(align.abi);
1136
1137                 if size.bytes() >= dl.obj_size_bound() {
1138                     return Err(LayoutError::SizeOverflow(ty));
1139                 }
1140
1141                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1142                 if typeck_ity < min_ity {
1143                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1144                     // some reason at this point (based on values discriminant can take on). Mostly
1145                     // because this discriminant will be loaded, and then stored into variable of
1146                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1147                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1148                     // discriminant values. That would be a bug, because then, in codegen, in order
1149                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1150                     // space necessary to represent would have to be discarded (or layout is wrong
1151                     // on thinking it needs 16 bits)
1152                     bug!(
1153                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1154                         min_ity,
1155                         typeck_ity
1156                     );
1157                     // However, it is fine to make discr type however large (as an optimisation)
1158                     // after this point â€“ we’ll just truncate the value we load in codegen.
1159                 }
1160
1161                 // Check to see if we should use a different type for the
1162                 // discriminant. We can safely use a type with the same size
1163                 // as the alignment of the first field of each variant.
1164                 // We increase the size of the discriminant to avoid LLVM copying
1165                 // padding when it doesn't need to. This normally causes unaligned
1166                 // load/stores and excessive memcpy/memset operations. By using a
1167                 // bigger integer size, LLVM can be sure about its contents and
1168                 // won't be so conservative.
1169
1170                 // Use the initial field alignment
1171                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1172                     min_ity
1173                 } else {
1174                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1175                 };
1176
1177                 // If the alignment is not larger than the chosen discriminant size,
1178                 // don't use the alignment as the final size.
1179                 if ity <= min_ity {
1180                     ity = min_ity;
1181                 } else {
1182                     // Patch up the variants' first few fields.
1183                     let old_ity_size = min_ity.size();
1184                     let new_ity_size = ity.size();
1185                     for variant in &mut layout_variants {
1186                         match variant.fields {
1187                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1188                                 for i in offsets {
1189                                     if *i <= old_ity_size {
1190                                         assert_eq!(*i, old_ity_size);
1191                                         *i = new_ity_size;
1192                                     }
1193                                 }
1194                                 // We might be making the struct larger.
1195                                 if variant.size <= old_ity_size {
1196                                     variant.size = new_ity_size;
1197                                 }
1198                             }
1199                             _ => bug!(),
1200                         }
1201                     }
1202                 }
1203
1204                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1205                 let tag = Scalar {
1206                     value: Int(ity, signed),
1207                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1208                 };
1209                 let mut abi = Abi::Aggregate { sized: true };
1210                 if tag.value.size(dl) == size {
1211                     abi = Abi::Scalar(tag.clone());
1212                 } else {
1213                     // Try to use a ScalarPair for all tagged enums.
1214                     let mut common_prim = None;
1215                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1216                         let offsets = match layout_variant.fields {
1217                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1218                             _ => bug!(),
1219                         };
1220                         let mut fields =
1221                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1222                         let (field, offset) = match (fields.next(), fields.next()) {
1223                             (None, None) => continue,
1224                             (Some(pair), None) => pair,
1225                             _ => {
1226                                 common_prim = None;
1227                                 break;
1228                             }
1229                         };
1230                         let prim = match field.abi {
1231                             Abi::Scalar(ref scalar) => scalar.value,
1232                             _ => {
1233                                 common_prim = None;
1234                                 break;
1235                             }
1236                         };
1237                         if let Some(pair) = common_prim {
1238                             // This is pretty conservative. We could go fancier
1239                             // by conflating things like i32 and u32, or even
1240                             // realising that (u8, u8) could just cohabit with
1241                             // u16 or even u32.
1242                             if pair != (prim, offset) {
1243                                 common_prim = None;
1244                                 break;
1245                             }
1246                         } else {
1247                             common_prim = Some((prim, offset));
1248                         }
1249                     }
1250                     if let Some((prim, offset)) = common_prim {
1251                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1252                         let pair_offsets = match pair.fields {
1253                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1254                                 assert_eq!(memory_index, &[0, 1]);
1255                                 offsets
1256                             }
1257                             _ => bug!(),
1258                         };
1259                         if pair_offsets[0] == Size::ZERO
1260                             && pair_offsets[1] == *offset
1261                             && align == pair.align
1262                             && size == pair.size
1263                         {
1264                             // We can use `ScalarPair` only when it matches our
1265                             // already computed layout (including `#[repr(C)]`).
1266                             abi = pair.abi;
1267                         }
1268                     }
1269                 }
1270
1271                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1272                     abi = Abi::Uninhabited;
1273                 }
1274
1275                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1276
1277                 let tagged_layout = Layout {
1278                     variants: Variants::Multiple {
1279                         tag,
1280                         tag_encoding: TagEncoding::Direct,
1281                         tag_field: 0,
1282                         variants: layout_variants,
1283                     },
1284                     fields: FieldsShape::Arbitrary {
1285                         offsets: vec![Size::ZERO],
1286                         memory_index: vec![0],
1287                     },
1288                     largest_niche,
1289                     abi,
1290                     align,
1291                     size,
1292                 };
1293
1294                 let best_layout = match (tagged_layout, niche_filling_layout) {
1295                     (tagged_layout, Some(niche_filling_layout)) => {
1296                         // Pick the smaller layout; otherwise,
1297                         // pick the layout with the larger niche; otherwise,
1298                         // pick tagged as it has simpler codegen.
1299                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1300                             let niche_size =
1301                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1302                             (layout.size, cmp::Reverse(niche_size))
1303                         })
1304                     }
1305                     (tagged_layout, None) => tagged_layout,
1306                 };
1307
1308                 tcx.intern_layout(best_layout)
1309             }
1310
1311             // Types with no meaningful known layout.
1312             ty::Projection(_) | ty::Opaque(..) => {
1313                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1314                 if ty == normalized {
1315                     return Err(LayoutError::Unknown(ty));
1316                 }
1317                 tcx.layout_raw(param_env.and(normalized))?
1318             }
1319
1320             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1321                 bug!("Layout::compute: unexpected type `{}`", ty)
1322             }
1323
1324             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1325                 return Err(LayoutError::Unknown(ty));
1326             }
1327         })
1328     }
1329 }
1330
1331 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1332 #[derive(Clone, Debug, PartialEq)]
1333 enum SavedLocalEligibility {
1334     Unassigned,
1335     Assigned(VariantIdx),
1336     // FIXME: Use newtype_index so we aren't wasting bytes
1337     Ineligible(Option<u32>),
1338 }
1339
1340 // When laying out generators, we divide our saved local fields into two
1341 // categories: overlap-eligible and overlap-ineligible.
1342 //
1343 // Those fields which are ineligible for overlap go in a "prefix" at the
1344 // beginning of the layout, and always have space reserved for them.
1345 //
1346 // Overlap-eligible fields are only assigned to one variant, so we lay
1347 // those fields out for each variant and put them right after the
1348 // prefix.
1349 //
1350 // Finally, in the layout details, we point to the fields from the
1351 // variants they are assigned to. It is possible for some fields to be
1352 // included in multiple variants. No field ever "moves around" in the
1353 // layout; its offset is always the same.
1354 //
1355 // Also included in the layout are the upvars and the discriminant.
1356 // These are included as fields on the "outer" layout; they are not part
1357 // of any variant.
1358 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1359     /// Compute the eligibility and assignment of each local.
1360     fn generator_saved_local_eligibility(
1361         &self,
1362         info: &GeneratorLayout<'tcx>,
1363     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1364         use SavedLocalEligibility::*;
1365
1366         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1367             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1368
1369         // The saved locals not eligible for overlap. These will get
1370         // "promoted" to the prefix of our generator.
1371         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1372
1373         // Figure out which of our saved locals are fields in only
1374         // one variant. The rest are deemed ineligible for overlap.
1375         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1376             for local in fields {
1377                 match assignments[*local] {
1378                     Unassigned => {
1379                         assignments[*local] = Assigned(variant_index);
1380                     }
1381                     Assigned(idx) => {
1382                         // We've already seen this local at another suspension
1383                         // point, so it is no longer a candidate.
1384                         trace!(
1385                             "removing local {:?} in >1 variant ({:?}, {:?})",
1386                             local,
1387                             variant_index,
1388                             idx
1389                         );
1390                         ineligible_locals.insert(*local);
1391                         assignments[*local] = Ineligible(None);
1392                     }
1393                     Ineligible(_) => {}
1394                 }
1395             }
1396         }
1397
1398         // Next, check every pair of eligible locals to see if they
1399         // conflict.
1400         for local_a in info.storage_conflicts.rows() {
1401             let conflicts_a = info.storage_conflicts.count(local_a);
1402             if ineligible_locals.contains(local_a) {
1403                 continue;
1404             }
1405
1406             for local_b in info.storage_conflicts.iter(local_a) {
1407                 // local_a and local_b are storage live at the same time, therefore they
1408                 // cannot overlap in the generator layout. The only way to guarantee
1409                 // this is if they are in the same variant, or one is ineligible
1410                 // (which means it is stored in every variant).
1411                 if ineligible_locals.contains(local_b)
1412                     || assignments[local_a] == assignments[local_b]
1413                 {
1414                     continue;
1415                 }
1416
1417                 // If they conflict, we will choose one to make ineligible.
1418                 // This is not always optimal; it's just a greedy heuristic that
1419                 // seems to produce good results most of the time.
1420                 let conflicts_b = info.storage_conflicts.count(local_b);
1421                 let (remove, other) =
1422                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1423                 ineligible_locals.insert(remove);
1424                 assignments[remove] = Ineligible(None);
1425                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1426             }
1427         }
1428
1429         // Count the number of variants in use. If only one of them, then it is
1430         // impossible to overlap any locals in our layout. In this case it's
1431         // always better to make the remaining locals ineligible, so we can
1432         // lay them out with the other locals in the prefix and eliminate
1433         // unnecessary padding bytes.
1434         {
1435             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1436             for assignment in &assignments {
1437                 if let Assigned(idx) = assignment {
1438                     used_variants.insert(*idx);
1439                 }
1440             }
1441             if used_variants.count() < 2 {
1442                 for assignment in assignments.iter_mut() {
1443                     *assignment = Ineligible(None);
1444                 }
1445                 ineligible_locals.insert_all();
1446             }
1447         }
1448
1449         // Write down the order of our locals that will be promoted to the prefix.
1450         {
1451             for (idx, local) in ineligible_locals.iter().enumerate() {
1452                 assignments[local] = Ineligible(Some(idx as u32));
1453             }
1454         }
1455         debug!("generator saved local assignments: {:?}", assignments);
1456
1457         (ineligible_locals, assignments)
1458     }
1459
1460     /// Compute the full generator layout.
1461     fn generator_layout(
1462         &self,
1463         ty: Ty<'tcx>,
1464         def_id: hir::def_id::DefId,
1465         substs: SubstsRef<'tcx>,
1466     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1467         use SavedLocalEligibility::*;
1468         let tcx = self.tcx;
1469         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1470
1471         let info = match tcx.generator_layout(def_id) {
1472             None => return Err(LayoutError::Unknown(ty)),
1473             Some(info) => info,
1474         };
1475         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1476
1477         // Build a prefix layout, including "promoting" all ineligible
1478         // locals as part of the prefix. We compute the layout of all of
1479         // these fields at once to get optimal packing.
1480         let tag_index = substs.as_generator().prefix_tys().count();
1481
1482         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1483         let max_discr = (info.variant_fields.len() - 1) as u128;
1484         let discr_int = Integer::fit_unsigned(max_discr);
1485         let discr_int_ty = discr_int.to_ty(tcx, false);
1486         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1487         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1488         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1489
1490         let promoted_layouts = ineligible_locals
1491             .iter()
1492             .map(|local| subst_field(info.field_tys[local]))
1493             .map(|ty| tcx.mk_maybe_uninit(ty))
1494             .map(|ty| self.layout_of(ty));
1495         let prefix_layouts = substs
1496             .as_generator()
1497             .prefix_tys()
1498             .map(|ty| self.layout_of(ty))
1499             .chain(iter::once(Ok(tag_layout)))
1500             .chain(promoted_layouts)
1501             .collect::<Result<Vec<_>, _>>()?;
1502         let prefix = self.univariant_uninterned(
1503             ty,
1504             &prefix_layouts,
1505             &ReprOptions::default(),
1506             StructKind::AlwaysSized,
1507         )?;
1508
1509         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1510
1511         // Split the prefix layout into the "outer" fields (upvars and
1512         // discriminant) and the "promoted" fields. Promoted fields will
1513         // get included in each variant that requested them in
1514         // GeneratorLayout.
1515         debug!("prefix = {:#?}", prefix);
1516         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1517             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1518                 let mut inverse_memory_index = invert_mapping(&memory_index);
1519
1520                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1521                 // "outer" and "promoted" fields respectively.
1522                 let b_start = (tag_index + 1) as u32;
1523                 let offsets_b = offsets.split_off(b_start as usize);
1524                 let offsets_a = offsets;
1525
1526                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1527                 // by preserving the order but keeping only one disjoint "half" each.
1528                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1529                 let inverse_memory_index_b: Vec<_> =
1530                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1531                 inverse_memory_index.retain(|&i| i < b_start);
1532                 let inverse_memory_index_a = inverse_memory_index;
1533
1534                 // Since `inverse_memory_index_{a,b}` each only refer to their
1535                 // respective fields, they can be safely inverted
1536                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1537                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1538
1539                 let outer_fields =
1540                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1541                 (outer_fields, offsets_b, memory_index_b)
1542             }
1543             _ => bug!(),
1544         };
1545
1546         let mut size = prefix.size;
1547         let mut align = prefix.align;
1548         let variants = info
1549             .variant_fields
1550             .iter_enumerated()
1551             .map(|(index, variant_fields)| {
1552                 // Only include overlap-eligible fields when we compute our variant layout.
1553                 let variant_only_tys = variant_fields
1554                     .iter()
1555                     .filter(|local| match assignments[**local] {
1556                         Unassigned => bug!(),
1557                         Assigned(v) if v == index => true,
1558                         Assigned(_) => bug!("assignment does not match variant"),
1559                         Ineligible(_) => false,
1560                     })
1561                     .map(|local| subst_field(info.field_tys[*local]));
1562
1563                 let mut variant = self.univariant_uninterned(
1564                     ty,
1565                     &variant_only_tys
1566                         .map(|ty| self.layout_of(ty))
1567                         .collect::<Result<Vec<_>, _>>()?,
1568                     &ReprOptions::default(),
1569                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1570                 )?;
1571                 variant.variants = Variants::Single { index };
1572
1573                 let (offsets, memory_index) = match variant.fields {
1574                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1575                     _ => bug!(),
1576                 };
1577
1578                 // Now, stitch the promoted and variant-only fields back together in
1579                 // the order they are mentioned by our GeneratorLayout.
1580                 // Because we only use some subset (that can differ between variants)
1581                 // of the promoted fields, we can't just pick those elements of the
1582                 // `promoted_memory_index` (as we'd end up with gaps).
1583                 // So instead, we build an "inverse memory_index", as if all of the
1584                 // promoted fields were being used, but leave the elements not in the
1585                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1586                 // obtain a valid (bijective) mapping.
1587                 const INVALID_FIELD_IDX: u32 = !0;
1588                 let mut combined_inverse_memory_index =
1589                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1590                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1591                 let combined_offsets = variant_fields
1592                     .iter()
1593                     .enumerate()
1594                     .map(|(i, local)| {
1595                         let (offset, memory_index) = match assignments[*local] {
1596                             Unassigned => bug!(),
1597                             Assigned(_) => {
1598                                 let (offset, memory_index) =
1599                                     offsets_and_memory_index.next().unwrap();
1600                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1601                             }
1602                             Ineligible(field_idx) => {
1603                                 let field_idx = field_idx.unwrap() as usize;
1604                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1605                             }
1606                         };
1607                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1608                         offset
1609                     })
1610                     .collect();
1611
1612                 // Remove the unused slots and invert the mapping to obtain the
1613                 // combined `memory_index` (also see previous comment).
1614                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1615                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1616
1617                 variant.fields = FieldsShape::Arbitrary {
1618                     offsets: combined_offsets,
1619                     memory_index: combined_memory_index,
1620                 };
1621
1622                 size = size.max(variant.size);
1623                 align = align.max(variant.align);
1624                 Ok(variant)
1625             })
1626             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1627
1628         size = size.align_to(align.abi);
1629
1630         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1631         {
1632             Abi::Uninhabited
1633         } else {
1634             Abi::Aggregate { sized: true }
1635         };
1636
1637         let layout = tcx.intern_layout(Layout {
1638             variants: Variants::Multiple {
1639                 tag,
1640                 tag_encoding: TagEncoding::Direct,
1641                 tag_field: tag_index,
1642                 variants,
1643             },
1644             fields: outer_fields,
1645             abi,
1646             largest_niche: prefix.largest_niche,
1647             size,
1648             align,
1649         });
1650         debug!("generator layout ({:?}): {:#?}", ty, layout);
1651         Ok(layout)
1652     }
1653
1654     /// This is invoked by the `layout_raw` query to record the final
1655     /// layout of each type.
1656     #[inline(always)]
1657     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1658         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1659         // for dumping later.
1660         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1661             self.record_layout_for_printing_outlined(layout)
1662         }
1663     }
1664
1665     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1666         // Ignore layouts that are done with non-empty environments or
1667         // non-monomorphic layouts, as the user only wants to see the stuff
1668         // resulting from the final codegen session.
1669         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1670             return;
1671         }
1672
1673         // (delay format until we actually need it)
1674         let record = |kind, packed, opt_discr_size, variants| {
1675             let type_desc = format!("{:?}", layout.ty);
1676             self.tcx.sess.code_stats.record_type_size(
1677                 kind,
1678                 type_desc,
1679                 layout.align.abi,
1680                 layout.size,
1681                 packed,
1682                 opt_discr_size,
1683                 variants,
1684             );
1685         };
1686
1687         let adt_def = match *layout.ty.kind() {
1688             ty::Adt(ref adt_def, _) => {
1689                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1690                 adt_def
1691             }
1692
1693             ty::Closure(..) => {
1694                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1695                 record(DataTypeKind::Closure, false, None, vec![]);
1696                 return;
1697             }
1698
1699             _ => {
1700                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1701                 return;
1702             }
1703         };
1704
1705         let adt_kind = adt_def.adt_kind();
1706         let adt_packed = adt_def.repr.pack.is_some();
1707
1708         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1709             let mut min_size = Size::ZERO;
1710             let field_info: Vec<_> = flds
1711                 .iter()
1712                 .enumerate()
1713                 .map(|(i, &name)| match layout.field(self, i) {
1714                     Err(err) => {
1715                         bug!("no layout found for field {}: `{:?}`", name, err);
1716                     }
1717                     Ok(field_layout) => {
1718                         let offset = layout.fields.offset(i);
1719                         let field_end = offset + field_layout.size;
1720                         if min_size < field_end {
1721                             min_size = field_end;
1722                         }
1723                         FieldInfo {
1724                             name: name.to_string(),
1725                             offset: offset.bytes(),
1726                             size: field_layout.size.bytes(),
1727                             align: field_layout.align.abi.bytes(),
1728                         }
1729                     }
1730                 })
1731                 .collect();
1732
1733             VariantInfo {
1734                 name: n.map(|n| n.to_string()),
1735                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1736                 align: layout.align.abi.bytes(),
1737                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1738                 fields: field_info,
1739             }
1740         };
1741
1742         match layout.variants {
1743             Variants::Single { index } => {
1744                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1745                 if !adt_def.variants.is_empty() {
1746                     let variant_def = &adt_def.variants[index];
1747                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1748                     record(
1749                         adt_kind.into(),
1750                         adt_packed,
1751                         None,
1752                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1753                     );
1754                 } else {
1755                     // (This case arises for *empty* enums; so give it
1756                     // zero variants.)
1757                     record(adt_kind.into(), adt_packed, None, vec![]);
1758                 }
1759             }
1760
1761             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1762                 debug!(
1763                     "print-type-size `{:#?}` adt general variants def {}",
1764                     layout.ty,
1765                     adt_def.variants.len()
1766                 );
1767                 let variant_infos: Vec<_> = adt_def
1768                     .variants
1769                     .iter_enumerated()
1770                     .map(|(i, variant_def)| {
1771                         let fields: Vec<_> =
1772                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1773                         build_variant_info(
1774                             Some(variant_def.ident),
1775                             &fields,
1776                             layout.for_variant(self, i),
1777                         )
1778                     })
1779                     .collect();
1780                 record(
1781                     adt_kind.into(),
1782                     adt_packed,
1783                     match tag_encoding {
1784                         TagEncoding::Direct => Some(tag.value.size(self)),
1785                         _ => None,
1786                     },
1787                     variant_infos,
1788                 );
1789             }
1790         }
1791     }
1792 }
1793
1794 /// Type size "skeleton", i.e., the only information determining a type's size.
1795 /// While this is conservative, (aside from constant sizes, only pointers,
1796 /// newtypes thereof and null pointer optimized enums are allowed), it is
1797 /// enough to statically check common use cases of transmute.
1798 #[derive(Copy, Clone, Debug)]
1799 pub enum SizeSkeleton<'tcx> {
1800     /// Any statically computable Layout.
1801     Known(Size),
1802
1803     /// A potentially-fat pointer.
1804     Pointer {
1805         /// If true, this pointer is never null.
1806         non_zero: bool,
1807         /// The type which determines the unsized metadata, if any,
1808         /// of this pointer. Either a type parameter or a projection
1809         /// depending on one, with regions erased.
1810         tail: Ty<'tcx>,
1811     },
1812 }
1813
1814 impl<'tcx> SizeSkeleton<'tcx> {
1815     pub fn compute(
1816         ty: Ty<'tcx>,
1817         tcx: TyCtxt<'tcx>,
1818         param_env: ty::ParamEnv<'tcx>,
1819     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1820         debug_assert!(!ty.has_infer_types_or_consts());
1821
1822         // First try computing a static layout.
1823         let err = match tcx.layout_of(param_env.and(ty)) {
1824             Ok(layout) => {
1825                 return Ok(SizeSkeleton::Known(layout.size));
1826             }
1827             Err(err) => err,
1828         };
1829
1830         match *ty.kind() {
1831             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1832                 let non_zero = !ty.is_unsafe_ptr();
1833                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1834                 match tail.kind() {
1835                     ty::Param(_) | ty::Projection(_) => {
1836                         debug_assert!(tail.has_param_types_or_consts());
1837                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1838                     }
1839                     _ => bug!(
1840                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1841                               tail `{}` is not a type parameter or a projection",
1842                         ty,
1843                         err,
1844                         tail
1845                     ),
1846                 }
1847             }
1848
1849             ty::Adt(def, substs) => {
1850                 // Only newtypes and enums w/ nullable pointer optimization.
1851                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1852                     return Err(err);
1853                 }
1854
1855                 // Get a zero-sized variant or a pointer newtype.
1856                 let zero_or_ptr_variant = |i| {
1857                     let i = VariantIdx::new(i);
1858                     let fields = def.variants[i]
1859                         .fields
1860                         .iter()
1861                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1862                     let mut ptr = None;
1863                     for field in fields {
1864                         let field = field?;
1865                         match field {
1866                             SizeSkeleton::Known(size) => {
1867                                 if size.bytes() > 0 {
1868                                     return Err(err);
1869                                 }
1870                             }
1871                             SizeSkeleton::Pointer { .. } => {
1872                                 if ptr.is_some() {
1873                                     return Err(err);
1874                                 }
1875                                 ptr = Some(field);
1876                             }
1877                         }
1878                     }
1879                     Ok(ptr)
1880                 };
1881
1882                 let v0 = zero_or_ptr_variant(0)?;
1883                 // Newtype.
1884                 if def.variants.len() == 1 {
1885                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1886                         return Ok(SizeSkeleton::Pointer {
1887                             non_zero: non_zero
1888                                 || match tcx.layout_scalar_valid_range(def.did) {
1889                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1890                                     (Bound::Included(start), Bound::Included(end)) => {
1891                                         0 < start && start < end
1892                                     }
1893                                     _ => false,
1894                                 },
1895                             tail,
1896                         });
1897                     } else {
1898                         return Err(err);
1899                     }
1900                 }
1901
1902                 let v1 = zero_or_ptr_variant(1)?;
1903                 // Nullable pointer enum optimization.
1904                 match (v0, v1) {
1905                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1906                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1907                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1908                     }
1909                     _ => Err(err),
1910                 }
1911             }
1912
1913             ty::Projection(_) | ty::Opaque(..) => {
1914                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1915                 if ty == normalized {
1916                     Err(err)
1917                 } else {
1918                     SizeSkeleton::compute(normalized, tcx, param_env)
1919                 }
1920             }
1921
1922             _ => Err(err),
1923         }
1924     }
1925
1926     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1927         match (self, other) {
1928             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1929             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1930                 a == b
1931             }
1932             _ => false,
1933         }
1934     }
1935 }
1936
1937 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1938     fn tcx(&self) -> TyCtxt<'tcx>;
1939 }
1940
1941 pub trait HasParamEnv<'tcx> {
1942     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1943 }
1944
1945 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1946     fn data_layout(&self) -> &TargetDataLayout {
1947         &self.data_layout
1948     }
1949 }
1950
1951 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1952     fn tcx(&self) -> TyCtxt<'tcx> {
1953         *self
1954     }
1955 }
1956
1957 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1958     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1959         self.param_env
1960     }
1961 }
1962
1963 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1964     fn data_layout(&self) -> &TargetDataLayout {
1965         self.tcx.data_layout()
1966     }
1967 }
1968
1969 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1970     fn tcx(&self) -> TyCtxt<'tcx> {
1971         self.tcx.tcx()
1972     }
1973 }
1974
1975 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1976
1977 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1978     type Ty = Ty<'tcx>;
1979     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1980
1981     /// Computes the layout of a type. Note that this implicitly
1982     /// executes in "reveal all" mode.
1983     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1984         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1985         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1986         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1987         let layout = TyAndLayout { ty, layout };
1988
1989         // N.B., this recording is normally disabled; when enabled, it
1990         // can however trigger recursive invocations of `layout_of`.
1991         // Therefore, we execute it *after* the main query has
1992         // completed, to avoid problems around recursive structures
1993         // and the like. (Admittedly, I wasn't able to reproduce a problem
1994         // here, but it seems like the right thing to do. -nmatsakis)
1995         self.record_layout_for_printing(layout);
1996
1997         Ok(layout)
1998     }
1999 }
2000
2001 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2002     type Ty = Ty<'tcx>;
2003     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2004
2005     /// Computes the layout of a type. Note that this implicitly
2006     /// executes in "reveal all" mode.
2007     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2008         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2009         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2010         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2011         let layout = TyAndLayout { ty, layout };
2012
2013         // N.B., this recording is normally disabled; when enabled, it
2014         // can however trigger recursive invocations of `layout_of`.
2015         // Therefore, we execute it *after* the main query has
2016         // completed, to avoid problems around recursive structures
2017         // and the like. (Admittedly, I wasn't able to reproduce a problem
2018         // here, but it seems like the right thing to do. -nmatsakis)
2019         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2020         cx.record_layout_for_printing(layout);
2021
2022         Ok(layout)
2023     }
2024 }
2025
2026 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2027 impl TyCtxt<'tcx> {
2028     /// Computes the layout of a type. Note that this implicitly
2029     /// executes in "reveal all" mode.
2030     #[inline]
2031     pub fn layout_of(
2032         self,
2033         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2034     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2035         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2036         cx.layout_of(param_env_and_ty.value)
2037     }
2038 }
2039
2040 impl ty::query::TyCtxtAt<'tcx> {
2041     /// Computes the layout of a type. Note that this implicitly
2042     /// executes in "reveal all" mode.
2043     #[inline]
2044     pub fn layout_of(
2045         self,
2046         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2047     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2048         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2049         cx.layout_of(param_env_and_ty.value)
2050     }
2051 }
2052
2053 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2054 where
2055     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2056         + HasTyCtxt<'tcx>
2057         + HasParamEnv<'tcx>,
2058 {
2059     fn for_variant(
2060         this: TyAndLayout<'tcx>,
2061         cx: &C,
2062         variant_index: VariantIdx,
2063     ) -> TyAndLayout<'tcx> {
2064         let layout = match this.variants {
2065             Variants::Single { index }
2066                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2067                 if index == variant_index &&
2068                 // Don't confuse variants of uninhabited enums with the enum itself.
2069                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2070                 this.fields != FieldsShape::Primitive =>
2071             {
2072                 this.layout
2073             }
2074
2075             Variants::Single { index } => {
2076                 // Deny calling for_variant more than once for non-Single enums.
2077                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2078                     assert_eq!(original_layout.variants, Variants::Single { index });
2079                 }
2080
2081                 let fields = match this.ty.kind() {
2082                     ty::Adt(def, _) if def.variants.is_empty() =>
2083                         bug!("for_variant called on zero-variant enum"),
2084                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2085                     _ => bug!(),
2086                 };
2087                 let tcx = cx.tcx();
2088                 tcx.intern_layout(Layout {
2089                     variants: Variants::Single { index: variant_index },
2090                     fields: match NonZeroUsize::new(fields) {
2091                         Some(fields) => FieldsShape::Union(fields),
2092                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2093                     },
2094                     abi: Abi::Uninhabited,
2095                     largest_niche: None,
2096                     align: tcx.data_layout.i8_align,
2097                     size: Size::ZERO,
2098                 })
2099             }
2100
2101             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2102         };
2103
2104         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2105
2106         TyAndLayout { ty: this.ty, layout }
2107     }
2108
2109     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2110         enum TyMaybeWithLayout<C: LayoutOf> {
2111             Ty(C::Ty),
2112             TyAndLayout(C::TyAndLayout),
2113         }
2114
2115         fn ty_and_layout_kind<
2116             C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2117                 + HasTyCtxt<'tcx>
2118                 + HasParamEnv<'tcx>,
2119         >(
2120             this: TyAndLayout<'tcx>,
2121             cx: &C,
2122             i: usize,
2123             ty: C::Ty,
2124         ) -> TyMaybeWithLayout<C> {
2125             let tcx = cx.tcx();
2126             let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2127                 let layout = Layout::scalar(cx, tag.clone());
2128                 MaybeResult::from(Ok(TyAndLayout {
2129                     layout: tcx.intern_layout(layout),
2130                     ty: tag.value.to_ty(tcx),
2131                 }))
2132             };
2133
2134             match *ty.kind() {
2135                 ty::Bool
2136                 | ty::Char
2137                 | ty::Int(_)
2138                 | ty::Uint(_)
2139                 | ty::Float(_)
2140                 | ty::FnPtr(_)
2141                 | ty::Never
2142                 | ty::FnDef(..)
2143                 | ty::GeneratorWitness(..)
2144                 | ty::Foreign(..)
2145                 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2146
2147                 // Potentially-fat pointers.
2148                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2149                     assert!(i < this.fields.count());
2150
2151                     // Reuse the fat `*T` type as its own thin pointer data field.
2152                     // This provides information about, e.g., DST struct pointees
2153                     // (which may have no non-DST form), and will work as long
2154                     // as the `Abi` or `FieldsShape` is checked by users.
2155                     if i == 0 {
2156                         let nil = tcx.mk_unit();
2157                         let ptr_ty = if ty.is_unsafe_ptr() {
2158                             tcx.mk_mut_ptr(nil)
2159                         } else {
2160                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2161                         };
2162                         return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2163                             cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2164                                 ptr_layout.ty = ty;
2165                                 ptr_layout
2166                             }),
2167                         ));
2168                     }
2169
2170                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2171                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2172                         ty::Dynamic(_, _) => {
2173                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2174                                 tcx.lifetimes.re_static,
2175                                 tcx.mk_array(tcx.types.usize, 3),
2176                             ))
2177                             /* FIXME: use actual fn pointers
2178                             Warning: naively computing the number of entries in the
2179                             vtable by counting the methods on the trait + methods on
2180                             all parent traits does not work, because some methods can
2181                             be not object safe and thus excluded from the vtable.
2182                             Increase this counter if you tried to implement this but
2183                             failed to do it without duplicating a lot of code from
2184                             other places in the compiler: 2
2185                             tcx.mk_tup(&[
2186                                 tcx.mk_array(tcx.types.usize, 3),
2187                                 tcx.mk_array(Option<fn()>),
2188                             ])
2189                             */
2190                         }
2191                         _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2192                     }
2193                 }
2194
2195                 // Arrays and slices.
2196                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2197                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2198
2199                 // Tuples, generators and closures.
2200                 ty::Closure(_, ref substs) => {
2201                     ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2202                 }
2203
2204                 ty::Generator(def_id, ref substs, _) => match this.variants {
2205                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2206                         substs
2207                             .as_generator()
2208                             .state_tys(def_id, tcx)
2209                             .nth(index.as_usize())
2210                             .unwrap()
2211                             .nth(i)
2212                             .unwrap(),
2213                     ),
2214                     Variants::Multiple { ref tag, tag_field, .. } => {
2215                         if i == tag_field {
2216                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2217                         }
2218                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2219                     }
2220                 },
2221
2222                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2223
2224                 // ADTs.
2225                 ty::Adt(def, substs) => {
2226                     match this.variants {
2227                         Variants::Single { index } => {
2228                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2229                         }
2230
2231                         // Discriminant field for enums (where applicable).
2232                         Variants::Multiple { ref tag, .. } => {
2233                             assert_eq!(i, 0);
2234                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2235                         }
2236                     }
2237                 }
2238
2239                 ty::Projection(_)
2240                 | ty::Bound(..)
2241                 | ty::Placeholder(..)
2242                 | ty::Opaque(..)
2243                 | ty::Param(_)
2244                 | ty::Infer(_)
2245                 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2246             }
2247         }
2248
2249         cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2250             TyMaybeWithLayout::Ty(result) => result,
2251             TyMaybeWithLayout::TyAndLayout(result) => return result,
2252         })
2253     }
2254
2255     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2256         let addr_space_of_ty = |ty: Ty<'tcx>| {
2257             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2258         };
2259
2260         let pointee_info = match *this.ty.kind() {
2261             ty::RawPtr(mt) if offset.bytes() == 0 => {
2262                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2263                     size: layout.size,
2264                     align: layout.align.abi,
2265                     safe: None,
2266                     address_space: addr_space_of_ty(mt.ty),
2267                 })
2268             }
2269             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2270                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2271                     PointeeInfo {
2272                         size: layout.size,
2273                         align: layout.align.abi,
2274                         safe: None,
2275                         address_space: cx.data_layout().instruction_address_space,
2276                     }
2277                 })
2278             }
2279             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2280                 let address_space = addr_space_of_ty(ty);
2281                 let tcx = cx.tcx();
2282                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2283                 let kind = match mt {
2284                     hir::Mutability::Not => {
2285                         if is_freeze {
2286                             PointerKind::Frozen
2287                         } else {
2288                             PointerKind::Shared
2289                         }
2290                     }
2291                     hir::Mutability::Mut => {
2292                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2293                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2294                         // in conjunction with unwinding, but later versions didn’t seem to have
2295                         // said issues. See issue #31681.
2296                         //
2297                         // Alas, later on we encountered a case where noalias would generate wrong
2298                         // code altogether even with recent versions of LLVM in *safe* code with no
2299                         // unwinding involved. See #54462.
2300                         //
2301                         // For now, do not enable mutable_noalias by default at all, while the
2302                         // issue is being figured out.
2303                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2304                             PointerKind::UniqueBorrowed
2305                         } else {
2306                             PointerKind::Shared
2307                         }
2308                     }
2309                 };
2310
2311                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2312                     size: layout.size,
2313                     align: layout.align.abi,
2314                     safe: Some(kind),
2315                     address_space,
2316                 })
2317             }
2318
2319             _ => {
2320                 let mut data_variant = match this.variants {
2321                     // Within the discriminant field, only the niche itself is
2322                     // always initialized, so we only check for a pointer at its
2323                     // offset.
2324                     //
2325                     // If the niche is a pointer, it's either valid (according
2326                     // to its type), or null (which the niche field's scalar
2327                     // validity range encodes).  This allows using
2328                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2329                     // this will continue to work as long as we don't start
2330                     // using more niches than just null (e.g., the first page of
2331                     // the address space, or unaligned pointers).
2332                     Variants::Multiple {
2333                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2334                         tag_field,
2335                         ..
2336                     } if this.fields.offset(tag_field) == offset => {
2337                         Some(this.for_variant(cx, dataful_variant))
2338                     }
2339                     _ => Some(this),
2340                 };
2341
2342                 if let Some(variant) = data_variant {
2343                     // We're not interested in any unions.
2344                     if let FieldsShape::Union(_) = variant.fields {
2345                         data_variant = None;
2346                     }
2347                 }
2348
2349                 let mut result = None;
2350
2351                 if let Some(variant) = data_variant {
2352                     let ptr_end = offset + Pointer.size(cx);
2353                     for i in 0..variant.fields.count() {
2354                         let field_start = variant.fields.offset(i);
2355                         if field_start <= offset {
2356                             let field = variant.field(cx, i);
2357                             result = field.to_result().ok().and_then(|field| {
2358                                 if ptr_end <= field_start + field.size {
2359                                     // We found the right field, look inside it.
2360                                     let field_info =
2361                                         field.pointee_info_at(cx, offset - field_start);
2362                                     field_info
2363                                 } else {
2364                                     None
2365                                 }
2366                             });
2367                             if result.is_some() {
2368                                 break;
2369                             }
2370                         }
2371                     }
2372                 }
2373
2374                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2375                 if let Some(ref mut pointee) = result {
2376                     if let ty::Adt(def, _) = this.ty.kind() {
2377                         if def.is_box() && offset.bytes() == 0 {
2378                             pointee.safe = Some(PointerKind::UniqueOwned);
2379                         }
2380                     }
2381                 }
2382
2383                 result
2384             }
2385         };
2386
2387         debug!(
2388             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2389             offset,
2390             this.ty.kind(),
2391             pointee_info
2392         );
2393
2394         pointee_info
2395     }
2396 }
2397
2398 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2399     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2400         use crate::ty::layout::LayoutError::*;
2401         mem::discriminant(self).hash_stable(hcx, hasher);
2402
2403         match *self {
2404             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2405         }
2406     }
2407 }
2408
2409 impl<'tcx> ty::Instance<'tcx> {
2410     // NOTE(eddyb) this is private to avoid using it from outside of
2411     // `FnAbi::of_instance` - any other uses are either too high-level
2412     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2413     // or should go through `FnAbi` instead, to avoid losing any
2414     // adjustments `FnAbi::of_instance` might be performing.
2415     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2416         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2417         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2418         match *ty.kind() {
2419             ty::FnDef(..) => {
2420                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2421                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2422                 // (i.e. due to being inside a projection that got normalized, see
2423                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2424                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2425                 let mut sig = match *ty.kind() {
2426                     ty::FnDef(def_id, substs) => tcx
2427                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2428                         .subst(tcx, substs),
2429                     _ => unreachable!(),
2430                 };
2431
2432                 if let ty::InstanceDef::VtableShim(..) = self.def {
2433                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2434                     sig = sig.map_bound(|mut sig| {
2435                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2436                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2437                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2438                         sig
2439                     });
2440                 }
2441                 sig
2442             }
2443             ty::Closure(def_id, substs) => {
2444                 let sig = substs.as_closure().sig();
2445
2446                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2447                 sig.map_bound(|sig| {
2448                     tcx.mk_fn_sig(
2449                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2450                         sig.output(),
2451                         sig.c_variadic,
2452                         sig.unsafety,
2453                         sig.abi,
2454                     )
2455                 })
2456             }
2457             ty::Generator(_, substs, _) => {
2458                 let sig = substs.as_generator().poly_sig();
2459
2460                 let br = ty::BoundRegion { kind: ty::BrEnv };
2461                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2462                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2463
2464                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2465                 let pin_adt_ref = tcx.adt_def(pin_did);
2466                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2467                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2468
2469                 sig.map_bound(|sig| {
2470                     let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2471                     let state_adt_ref = tcx.adt_def(state_did);
2472                     let state_substs =
2473                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2474                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2475
2476                     tcx.mk_fn_sig(
2477                         [env_ty, sig.resume_ty].iter(),
2478                         &ret_ty,
2479                         false,
2480                         hir::Unsafety::Normal,
2481                         rustc_target::spec::abi::Abi::Rust,
2482                     )
2483                 })
2484             }
2485             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2486         }
2487     }
2488 }
2489
2490 pub trait FnAbiExt<'tcx, C>
2491 where
2492     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2493         + HasDataLayout
2494         + HasTargetSpec
2495         + HasTyCtxt<'tcx>
2496         + HasParamEnv<'tcx>,
2497 {
2498     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2499     ///
2500     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2501     /// instead, where the instance is a `InstanceDef::Virtual`.
2502     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2503
2504     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2505     /// direct calls to an `fn`.
2506     ///
2507     /// NB: that includes virtual calls, which are represented by "direct calls"
2508     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2509     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2510
2511     fn new_internal(
2512         cx: &C,
2513         sig: ty::PolyFnSig<'tcx>,
2514         extra_args: &[Ty<'tcx>],
2515         caller_location: Option<Ty<'tcx>>,
2516         codegen_fn_attr_flags: CodegenFnAttrFlags,
2517         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2518     ) -> Self;
2519     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2520 }
2521
2522 fn fn_can_unwind(
2523     panic_strategy: PanicStrategy,
2524     codegen_fn_attr_flags: CodegenFnAttrFlags,
2525     call_conv: Conv,
2526 ) -> bool {
2527     if panic_strategy != PanicStrategy::Unwind {
2528         // In panic=abort mode we assume nothing can unwind anywhere, so
2529         // optimize based on this!
2530         false
2531     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2532         // If a specific #[unwind] attribute is present, use that.
2533         true
2534     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2535         // Special attribute for allocator functions, which can't unwind.
2536         false
2537     } else {
2538         if call_conv == Conv::Rust {
2539             // Any Rust method (or `extern "Rust" fn` or `extern
2540             // "rust-call" fn`) is explicitly allowed to unwind
2541             // (unless it has no-unwind attribute, handled above).
2542             true
2543         } else {
2544             // Anything else is either:
2545             //
2546             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2547             //
2548             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2549             //
2550             // Foreign items (case 1) are assumed to not unwind; it is
2551             // UB otherwise. (At least for now; see also
2552             // rust-lang/rust#63909 and Rust RFC 2753.)
2553             //
2554             // Items defined in Rust with non-Rust ABIs (case 2) are also
2555             // not supposed to unwind. Whether this should be enforced
2556             // (versus stating it is UB) and *how* it would be enforced
2557             // is currently under discussion; see rust-lang/rust#58794.
2558             //
2559             // In either case, we mark item as explicitly nounwind.
2560             false
2561         }
2562     }
2563 }
2564
2565 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2566 where
2567     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2568         + HasDataLayout
2569         + HasTargetSpec
2570         + HasTyCtxt<'tcx>
2571         + HasParamEnv<'tcx>,
2572 {
2573     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2574         // Assume that fn pointers may always unwind
2575         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2576
2577         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2578             ArgAbi::new(cx.layout_of(ty))
2579         })
2580     }
2581
2582     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2583         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2584
2585         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2586             Some(cx.tcx().caller_location_ty())
2587         } else {
2588             None
2589         };
2590
2591         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2592
2593         call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2594             let mut layout = cx.layout_of(ty);
2595             // Don't pass the vtable, it's not an argument of the virtual fn.
2596             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2597             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2598             if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2599                 let fat_pointer_ty = if layout.is_unsized() {
2600                     // unsized `self` is passed as a pointer to `self`
2601                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2602                     cx.tcx().mk_mut_ptr(layout.ty)
2603                 } else {
2604                     match layout.abi {
2605                         Abi::ScalarPair(..) => (),
2606                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2607                     }
2608
2609                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2610                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2611                     // elsewhere in the compiler as a method on a `dyn Trait`.
2612                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2613                     // get a built-in pointer type
2614                     let mut fat_pointer_layout = layout;
2615                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2616                         && !fat_pointer_layout.ty.is_region_ptr()
2617                     {
2618                         for i in 0..fat_pointer_layout.fields.count() {
2619                             let field_layout = fat_pointer_layout.field(cx, i);
2620
2621                             if !field_layout.is_zst() {
2622                                 fat_pointer_layout = field_layout;
2623                                 continue 'descend_newtypes;
2624                             }
2625                         }
2626
2627                         bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2628                     }
2629
2630                     fat_pointer_layout.ty
2631                 };
2632
2633                 // we now have a type like `*mut RcBox<dyn Trait>`
2634                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2635                 // this is understood as a special case elsewhere in the compiler
2636                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2637                 layout = cx.layout_of(unit_pointer_ty);
2638                 layout.ty = fat_pointer_ty;
2639             }
2640             ArgAbi::new(layout)
2641         })
2642     }
2643
2644     fn new_internal(
2645         cx: &C,
2646         sig: ty::PolyFnSig<'tcx>,
2647         extra_args: &[Ty<'tcx>],
2648         caller_location: Option<Ty<'tcx>>,
2649         codegen_fn_attr_flags: CodegenFnAttrFlags,
2650         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2651     ) -> Self {
2652         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2653
2654         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2655
2656         use rustc_target::spec::abi::Abi::*;
2657         let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2658             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2659
2660             // It's the ABI's job to select this, not ours.
2661             System => bug!("system abi should be selected elsewhere"),
2662             EfiApi => bug!("eficall abi should be selected elsewhere"),
2663
2664             Stdcall => Conv::X86Stdcall,
2665             Fastcall => Conv::X86Fastcall,
2666             Vectorcall => Conv::X86VectorCall,
2667             Thiscall => Conv::X86ThisCall,
2668             C => Conv::C,
2669             Unadjusted => Conv::C,
2670             Win64 => Conv::X86_64Win64,
2671             SysV64 => Conv::X86_64SysV,
2672             Aapcs => Conv::ArmAapcs,
2673             PtxKernel => Conv::PtxKernel,
2674             Msp430Interrupt => Conv::Msp430Intr,
2675             X86Interrupt => Conv::X86Intr,
2676             AmdGpuKernel => Conv::AmdGpuKernel,
2677             AvrInterrupt => Conv::AvrInterrupt,
2678             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2679
2680             // These API constants ought to be more specific...
2681             Cdecl => Conv::C,
2682         };
2683
2684         let mut inputs = sig.inputs();
2685         let extra_args = if sig.abi == RustCall {
2686             assert!(!sig.c_variadic && extra_args.is_empty());
2687
2688             if let Some(input) = sig.inputs().last() {
2689                 if let ty::Tuple(tupled_arguments) = input.kind() {
2690                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2691                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2692                 } else {
2693                     bug!(
2694                         "argument to function with \"rust-call\" ABI \
2695                             is not a tuple"
2696                     );
2697                 }
2698             } else {
2699                 bug!(
2700                     "argument to function with \"rust-call\" ABI \
2701                         is not a tuple"
2702                 );
2703             }
2704         } else {
2705             assert!(sig.c_variadic || extra_args.is_empty());
2706             extra_args.to_vec()
2707         };
2708
2709         let target = &cx.tcx().sess.target;
2710         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2711         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2712         let linux_s390x_gnu_like =
2713             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2714         let linux_sparc64_gnu_like =
2715             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2716         let linux_powerpc_gnu_like =
2717             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2718         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2719
2720         // Handle safe Rust thin and fat pointers.
2721         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2722                                       scalar: &Scalar,
2723                                       layout: TyAndLayout<'tcx>,
2724                                       offset: Size,
2725                                       is_return: bool| {
2726             // Booleans are always an i1 that needs to be zero-extended.
2727             if scalar.is_bool() {
2728                 attrs.ext(ArgExtension::Zext);
2729                 return;
2730             }
2731
2732             // Only pointer types handled below.
2733             if scalar.value != Pointer {
2734                 return;
2735             }
2736
2737             if scalar.valid_range.start() < scalar.valid_range.end() {
2738                 if *scalar.valid_range.start() > 0 {
2739                     attrs.set(ArgAttribute::NonNull);
2740                 }
2741             }
2742
2743             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2744                 if let Some(kind) = pointee.safe {
2745                     attrs.pointee_align = Some(pointee.align);
2746
2747                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2748                     // for the entire duration of the function as they can be deallocated
2749                     // at any time. Set their valid size to 0.
2750                     attrs.pointee_size = match kind {
2751                         PointerKind::UniqueOwned => Size::ZERO,
2752                         _ => pointee.size,
2753                     };
2754
2755                     // `Box` pointer parameters never alias because ownership is transferred
2756                     // `&mut` pointer parameters never alias other parameters,
2757                     // or mutable global data
2758                     //
2759                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2760                     // and can be marked as both `readonly` and `noalias`, as
2761                     // LLVM's definition of `noalias` is based solely on memory
2762                     // dependencies rather than pointer equality
2763                     let no_alias = match kind {
2764                         PointerKind::Shared => false,
2765                         PointerKind::UniqueOwned => true,
2766                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2767                     };
2768                     if no_alias {
2769                         attrs.set(ArgAttribute::NoAlias);
2770                     }
2771
2772                     if kind == PointerKind::Frozen && !is_return {
2773                         attrs.set(ArgAttribute::ReadOnly);
2774                     }
2775                 }
2776             }
2777         };
2778
2779         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2780             let is_return = arg_idx.is_none();
2781             let mut arg = mk_arg_type(ty, arg_idx);
2782             if arg.layout.is_zst() {
2783                 // For some forsaken reason, x86_64-pc-windows-gnu
2784                 // doesn't ignore zero-sized struct arguments.
2785                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2786                 if is_return
2787                     || rust_abi
2788                     || (!win_x64_gnu
2789                         && !linux_s390x_gnu_like
2790                         && !linux_sparc64_gnu_like
2791                         && !linux_powerpc_gnu_like)
2792                 {
2793                     arg.mode = PassMode::Ignore;
2794                 }
2795             }
2796
2797             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2798             if !is_return && rust_abi {
2799                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2800                     let mut a_attrs = ArgAttributes::new();
2801                     let mut b_attrs = ArgAttributes::new();
2802                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2803                     adjust_for_rust_scalar(
2804                         &mut b_attrs,
2805                         b,
2806                         arg.layout,
2807                         a.value.size(cx).align_to(b.value.align(cx).abi),
2808                         false,
2809                     );
2810                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2811                     return arg;
2812                 }
2813             }
2814
2815             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2816                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2817                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2818                 }
2819             }
2820
2821             arg
2822         };
2823
2824         let mut fn_abi = FnAbi {
2825             ret: arg_of(sig.output(), None),
2826             args: inputs
2827                 .iter()
2828                 .cloned()
2829                 .chain(extra_args)
2830                 .chain(caller_location)
2831                 .enumerate()
2832                 .map(|(i, ty)| arg_of(ty, Some(i)))
2833                 .collect(),
2834             c_variadic: sig.c_variadic,
2835             fixed_count: inputs.len(),
2836             conv,
2837             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2838         };
2839         fn_abi.adjust_for_abi(cx, sig.abi);
2840         debug!("FnAbi::new_internal = {:?}", fn_abi);
2841         fn_abi
2842     }
2843
2844     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2845         if abi == SpecAbi::Unadjusted {
2846             return;
2847         }
2848
2849         if abi == SpecAbi::Rust
2850             || abi == SpecAbi::RustCall
2851             || abi == SpecAbi::RustIntrinsic
2852             || abi == SpecAbi::PlatformIntrinsic
2853         {
2854             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2855                 if arg.is_ignore() {
2856                     return;
2857                 }
2858
2859                 match arg.layout.abi {
2860                     Abi::Aggregate { .. } => {}
2861
2862                     // This is a fun case! The gist of what this is doing is
2863                     // that we want callers and callees to always agree on the
2864                     // ABI of how they pass SIMD arguments. If we were to *not*
2865                     // make these arguments indirect then they'd be immediates
2866                     // in LLVM, which means that they'd used whatever the
2867                     // appropriate ABI is for the callee and the caller. That
2868                     // means, for example, if the caller doesn't have AVX
2869                     // enabled but the callee does, then passing an AVX argument
2870                     // across this boundary would cause corrupt data to show up.
2871                     //
2872                     // This problem is fixed by unconditionally passing SIMD
2873                     // arguments through memory between callers and callees
2874                     // which should get them all to agree on ABI regardless of
2875                     // target feature sets. Some more information about this
2876                     // issue can be found in #44367.
2877                     //
2878                     // Note that the platform intrinsic ABI is exempt here as
2879                     // that's how we connect up to LLVM and it's unstable
2880                     // anyway, we control all calls to it in libstd.
2881                     Abi::Vector { .. }
2882                         if abi != SpecAbi::PlatformIntrinsic
2883                             && cx.tcx().sess.target.simd_types_indirect =>
2884                     {
2885                         arg.make_indirect();
2886                         return;
2887                     }
2888
2889                     _ => return,
2890                 }
2891
2892                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2893                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2894                 let max_by_val_size = Pointer.size(cx) * 2;
2895                 let size = arg.layout.size;
2896
2897                 if arg.layout.is_unsized() || size > max_by_val_size {
2898                     arg.make_indirect();
2899                 } else {
2900                     // We want to pass small aggregates as immediates, but using
2901                     // a LLVM aggregate type for this leads to bad optimizations,
2902                     // so we pick an appropriately sized integer type instead.
2903                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2904                 }
2905             };
2906             fixup(&mut self.ret);
2907             for arg in &mut self.args {
2908                 fixup(arg);
2909             }
2910             return;
2911         }
2912
2913         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2914             cx.tcx().sess.fatal(&msg);
2915         }
2916     }
2917 }