]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Auto merge of #78380 - bstrie:rm-old-num-const-from-tests, r=jyn514
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast::{self as ast, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'tcx>(
34         tcx: TyCtxt<'tcx>,
35         ty: Ty<'tcx>,
36         repr: &ReprOptions,
37         min: i128,
38         max: i128,
39     ) -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Gets the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Finds the appropriate Integer type and signedness for the given
75     /// signed discriminant range and `#[repr]` attribute.
76     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'tcx>(
79         tcx: TyCtxt<'tcx>,
80         ty: Ty<'tcx>,
81         repr: &ReprOptions,
82         min: i128,
83         max: i128,
84     ) -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!(
100                     "Integer::repr_discr: `#[repr]` hint too small for \
101                       discriminant range of enum `{}",
102                     ty
103                 )
104             }
105             return (discr, ity.is_signed());
106         }
107
108         if repr.c() {
109             match &tcx.sess.target.arch[..] {
110                 // WARNING: the ARM EABI has two variants; the one corresponding
111                 // to `at_least == I32` appears to be used on Linux and NetBSD,
112                 // but some systems may use the variant corresponding to no
113                 // lower bound. However, we don't run on those yet...?
114                 "arm" => min_from_extern = Some(I32),
115                 _ => min_from_extern = Some(I32),
116             }
117         }
118
119         let at_least = min_from_extern.unwrap_or(min_default);
120
121         // If there are no negative values, we can use the unsigned fit.
122         if min >= 0 {
123             (cmp::max(unsigned_fit, at_least), false)
124         } else {
125             (cmp::max(signed_fit, at_least), true)
126         }
127     }
128 }
129
130 pub trait PrimitiveExt {
131     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
133 }
134
135 impl PrimitiveExt for Primitive {
136     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
137         match *self {
138             Int(i, signed) => i.to_ty(tcx, signed),
139             F32 => tcx.types.f32,
140             F64 => tcx.types.f64,
141             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142         }
143     }
144
145     /// Return an *integer* type matching this primitive.
146     /// Useful in particular when dealing with enum discriminants.
147     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148         match *self {
149             Int(i, signed) => i.to_ty(tcx, signed),
150             Pointer => tcx.types.usize,
151             F32 | F64 => bug!("floats do not have an int type"),
152         }
153     }
154 }
155
156 /// The first half of a fat pointer.
157 ///
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
161
162 /// The second half of a fat pointer.
163 ///
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
167
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
170     Unknown(Ty<'tcx>),
171     SizeOverflow(Ty<'tcx>),
172 }
173
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
176         match *self {
177             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
178             LayoutError::SizeOverflow(ty) => {
179                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
180             }
181         }
182     }
183 }
184
185 fn layout_raw<'tcx>(
186     tcx: TyCtxt<'tcx>,
187     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189     ty::tls::with_related_context(tcx, move |icx| {
190         let (param_env, ty) = query.into_parts();
191
192         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
194         }
195
196         // Update the ImplicitCtxt to increase the layout_depth
197         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
198
199         ty::tls::enter_context(&icx, |_| {
200             let cx = LayoutCx { tcx, param_env };
201             let layout = cx.layout_raw_uncached(ty);
202             // Type-level uninhabitedness should always imply ABI uninhabitedness.
203             if let Ok(layout) = layout {
204                 if ty.conservative_is_privately_uninhabited(tcx) {
205                     assert!(layout.abi.is_uninhabited());
206                 }
207             }
208             layout
209         })
210     })
211 }
212
213 pub fn provide(providers: &mut ty::query::Providers) {
214     *providers = ty::query::Providers { layout_raw, ..*providers };
215 }
216
217 pub struct LayoutCx<'tcx, C> {
218     pub tcx: C,
219     pub param_env: ty::ParamEnv<'tcx>,
220 }
221
222 #[derive(Copy, Clone, Debug)]
223 enum StructKind {
224     /// A tuple, closure, or univariant which cannot be coerced to unsized.
225     AlwaysSized,
226     /// A univariant, the last field of which may be coerced to unsized.
227     MaybeUnsized,
228     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229     Prefixed(Size, Align),
230 }
231
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238     let mut inverse = vec![0; map.len()];
239     for i in 0..map.len() {
240         inverse[map[i] as usize] = i as u32;
241     }
242     inverse
243 }
244
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247         let dl = self.data_layout();
248         let b_align = b.value.align(dl);
249         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250         let b_offset = a.value.size(dl).align_to(b_align.abi);
251         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
252
253         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254         // returns the last maximum.
255         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256             .into_iter()
257             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258             .max_by_key(|niche| niche.available(dl));
259
260         Layout {
261             variants: Variants::Single { index: VariantIdx::new(0) },
262             fields: FieldsShape::Arbitrary {
263                 offsets: vec![Size::ZERO, b_offset],
264                 memory_index: vec![0, 1],
265             },
266             abi: Abi::ScalarPair(a, b),
267             largest_niche,
268             align,
269             size,
270         }
271     }
272
273     fn univariant_uninterned(
274         &self,
275         ty: Ty<'tcx>,
276         fields: &[TyAndLayout<'_>],
277         repr: &ReprOptions,
278         kind: StructKind,
279     ) -> Result<Layout, LayoutError<'tcx>> {
280         let dl = self.data_layout();
281         let pack = repr.pack;
282         if pack.is_some() && repr.align.is_some() {
283             bug!("struct cannot be packed and aligned");
284         }
285
286         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
287
288         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
289
290         let optimize = !repr.inhibit_struct_field_reordering_opt();
291         if optimize {
292             let end =
293                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294             let optimizing = &mut inverse_memory_index[..end];
295             let field_align = |f: &TyAndLayout<'_>| {
296                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
297             };
298             match kind {
299                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300                     optimizing.sort_by_key(|&x| {
301                         // Place ZSTs first to avoid "interesting offsets",
302                         // especially with only one or two non-ZST fields.
303                         let f = &fields[x as usize];
304                         (!f.is_zst(), cmp::Reverse(field_align(f)))
305                     });
306                 }
307                 StructKind::Prefixed(..) => {
308                     // Sort in ascending alignment so that the layout stay optimal
309                     // regardless of the prefix
310                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
311                 }
312             }
313         }
314
315         // inverse_memory_index holds field indices by increasing memory offset.
316         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317         // We now write field offsets to the corresponding offset slot;
318         // field 5 with offset 0 puts 0 in offsets[5].
319         // At the bottom of this function, we invert `inverse_memory_index` to
320         // produce `memory_index` (see `invert_mapping`).
321
322         let mut sized = true;
323         let mut offsets = vec![Size::ZERO; fields.len()];
324         let mut offset = Size::ZERO;
325         let mut largest_niche = None;
326         let mut largest_niche_available = 0;
327
328         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
329             let prefix_align =
330                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331             align = align.max(AbiAndPrefAlign::new(prefix_align));
332             offset = prefix_size.align_to(prefix_align);
333         }
334
335         for &i in &inverse_memory_index {
336             let field = fields[i as usize];
337             if !sized {
338                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
339             }
340
341             if field.is_unsized() {
342                 sized = false;
343             }
344
345             // Invariant: offset < dl.obj_size_bound() <= 1<<61
346             let field_align = if let Some(pack) = pack {
347                 field.align.min(AbiAndPrefAlign::new(pack))
348             } else {
349                 field.align
350             };
351             offset = offset.align_to(field_align.abi);
352             align = align.max(field_align);
353
354             debug!("univariant offset: {:?} field: {:#?}", offset, field);
355             offsets[i as usize] = offset;
356
357             if !repr.hide_niche() {
358                 if let Some(mut niche) = field.largest_niche.clone() {
359                     let available = niche.available(dl);
360                     if available > largest_niche_available {
361                         largest_niche_available = available;
362                         niche.offset += offset;
363                         largest_niche = Some(niche);
364                     }
365                 }
366             }
367
368             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
369         }
370
371         if let Some(repr_align) = repr.align {
372             align = align.max(AbiAndPrefAlign::new(repr_align));
373         }
374
375         debug!("univariant min_size: {:?}", offset);
376         let min_size = offset;
377
378         // As stated above, inverse_memory_index holds field indices by increasing offset.
379         // This makes it an already-sorted view of the offsets vec.
380         // To invert it, consider:
381         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382         // Field 5 would be the first element, so memory_index is i:
383         // Note: if we didn't optimize, it's already right.
384
385         let memory_index =
386             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
387
388         let size = min_size.align_to(align.abi);
389         let mut abi = Abi::Aggregate { sized };
390
391         // Unpack newtype ABIs and find scalar pairs.
392         if sized && size.bytes() > 0 {
393             // All other fields must be ZSTs.
394             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
395
396             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397                 // We have exactly one non-ZST field.
398                 (Some((i, field)), None, None) => {
399                     // Field fills the struct and it has a scalar or scalar pair ABI.
400                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
401                     {
402                         match field.abi {
403                             // For plain scalars, or vectors of them, we can't unpack
404                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
405                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406                                 abi = field.abi.clone();
407                             }
408                             // But scalar pairs are Rust-specific and get
409                             // treated as aggregates by C ABIs anyway.
410                             Abi::ScalarPair(..) => {
411                                 abi = field.abi.clone();
412                             }
413                             _ => {}
414                         }
415                     }
416                 }
417
418                 // Two non-ZST fields, and they're both scalars.
419                 (
420                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
422                     None,
423                 ) => {
424                     // Order by the memory placement, not source order.
425                     let ((i, a), (j, b)) =
426                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427                     let pair = self.scalar_pair(a.clone(), b.clone());
428                     let pair_offsets = match pair.fields {
429                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430                             assert_eq!(memory_index, &[0, 1]);
431                             offsets
432                         }
433                         _ => bug!(),
434                     };
435                     if offsets[i] == pair_offsets[0]
436                         && offsets[j] == pair_offsets[1]
437                         && align == pair.align
438                         && size == pair.size
439                     {
440                         // We can use `ScalarPair` only when it matches our
441                         // already computed layout (including `#[repr(C)]`).
442                         abi = pair.abi;
443                     }
444                 }
445
446                 _ => {}
447             }
448         }
449
450         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451             abi = Abi::Uninhabited;
452         }
453
454         Ok(Layout {
455             variants: Variants::Single { index: VariantIdx::new(0) },
456             fields: FieldsShape::Arbitrary { offsets, memory_index },
457             abi,
458             largest_niche,
459             align,
460             size,
461         })
462     }
463
464     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
465         let tcx = self.tcx;
466         let param_env = self.param_env;
467         let dl = self.data_layout();
468         let scalar_unit = |value: Primitive| {
469             let bits = value.size(dl).bits();
470             assert!(bits <= 128);
471             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
472         };
473         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
474
475         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
476             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
477         };
478         debug_assert!(!ty.has_infer_types_or_consts());
479
480         Ok(match *ty.kind() {
481             // Basic scalars.
482             ty::Bool => tcx.intern_layout(Layout::scalar(
483                 self,
484                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
485             )),
486             ty::Char => tcx.intern_layout(Layout::scalar(
487                 self,
488                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
489             )),
490             ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491             ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
492             ty::Float(fty) => scalar(match fty {
493                 ast::FloatTy::F32 => F32,
494                 ast::FloatTy::F64 => F64,
495             }),
496             ty::FnPtr(_) => {
497                 let mut ptr = scalar_unit(Pointer);
498                 ptr.valid_range = 1..=*ptr.valid_range.end();
499                 tcx.intern_layout(Layout::scalar(self, ptr))
500             }
501
502             // The never type.
503             ty::Never => tcx.intern_layout(Layout {
504                 variants: Variants::Single { index: VariantIdx::new(0) },
505                 fields: FieldsShape::Primitive,
506                 abi: Abi::Uninhabited,
507                 largest_niche: None,
508                 align: dl.i8_align,
509                 size: Size::ZERO,
510             }),
511
512             // Potentially-wide pointers.
513             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514                 let mut data_ptr = scalar_unit(Pointer);
515                 if !ty.is_unsafe_ptr() {
516                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517                 }
518
519                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
522                 }
523
524                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
525                 let metadata = match unsized_part.kind() {
526                     ty::Foreign(..) => {
527                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
528                     }
529                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
530                     ty::Dynamic(..) => {
531                         let mut vtable = scalar_unit(Pointer);
532                         vtable.valid_range = 1..=*vtable.valid_range.end();
533                         vtable
534                     }
535                     _ => return Err(LayoutError::Unknown(unsized_part)),
536                 };
537
538                 // Effectively a (ptr, meta) tuple.
539                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
540             }
541
542             // Arrays and slices.
543             ty::Array(element, mut count) => {
544                 if count.has_projections() {
545                     count = tcx.normalize_erasing_regions(param_env, count);
546                     if count.has_projections() {
547                         return Err(LayoutError::Unknown(ty));
548                     }
549                 }
550
551                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
552                 let element = self.layout_of(element)?;
553                 let size =
554                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
555
556                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
557                     Abi::Uninhabited
558                 } else {
559                     Abi::Aggregate { sized: true }
560                 };
561
562                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
563
564                 tcx.intern_layout(Layout {
565                     variants: Variants::Single { index: VariantIdx::new(0) },
566                     fields: FieldsShape::Array { stride: element.size, count },
567                     abi,
568                     largest_niche,
569                     align: element.align,
570                     size,
571                 })
572             }
573             ty::Slice(element) => {
574                 let element = self.layout_of(element)?;
575                 tcx.intern_layout(Layout {
576                     variants: Variants::Single { index: VariantIdx::new(0) },
577                     fields: FieldsShape::Array { stride: element.size, count: 0 },
578                     abi: Abi::Aggregate { sized: false },
579                     largest_niche: None,
580                     align: element.align,
581                     size: Size::ZERO,
582                 })
583             }
584             ty::Str => tcx.intern_layout(Layout {
585                 variants: Variants::Single { index: VariantIdx::new(0) },
586                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
587                 abi: Abi::Aggregate { sized: false },
588                 largest_niche: None,
589                 align: dl.i8_align,
590                 size: Size::ZERO,
591             }),
592
593             // Odd unit types.
594             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
595             ty::Dynamic(..) | ty::Foreign(..) => {
596                 let mut unit = self.univariant_uninterned(
597                     ty,
598                     &[],
599                     &ReprOptions::default(),
600                     StructKind::AlwaysSized,
601                 )?;
602                 match unit.abi {
603                     Abi::Aggregate { ref mut sized } => *sized = false,
604                     _ => bug!(),
605                 }
606                 tcx.intern_layout(unit)
607             }
608
609             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
610
611             ty::Closure(_, ref substs) => {
612                 let tys = substs.as_closure().upvar_tys();
613                 univariant(
614                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
615                     &ReprOptions::default(),
616                     StructKind::AlwaysSized,
617                 )?
618             }
619
620             ty::Tuple(tys) => {
621                 let kind =
622                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
623
624                 univariant(
625                     &tys.iter()
626                         .map(|k| self.layout_of(k.expect_ty()))
627                         .collect::<Result<Vec<_>, _>>()?,
628                     &ReprOptions::default(),
629                     kind,
630                 )?
631             }
632
633             // SIMD vector types.
634             ty::Adt(def, substs) if def.repr.simd() => {
635                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
636                 //
637                 // * #[repr(simd)] struct S(T, T, T, T);
638                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
639                 // * #[repr(simd)] struct S([T; 4])
640                 //
641                 // where T is a primitive scalar (integer/float/pointer).
642
643                 // SIMD vectors with zero fields are not supported.
644                 // (should be caught by typeck)
645                 if def.non_enum_variant().fields.is_empty() {
646                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
647                 }
648
649                 // Type of the first ADT field:
650                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
651
652                 // Heterogeneous SIMD vectors are not supported:
653                 // (should be caught by typeck)
654                 for fi in &def.non_enum_variant().fields {
655                     if fi.ty(tcx, substs) != f0_ty {
656                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
657                     }
658                 }
659
660                 // The element type and number of elements of the SIMD vector
661                 // are obtained from:
662                 //
663                 // * the element type and length of the single array field, if
664                 // the first field is of array type, or
665                 //
666                 // * the homogenous field type and the number of fields.
667                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
668                     // First ADT field is an array:
669
670                     // SIMD vectors with multiple array fields are not supported:
671                     // (should be caught by typeck)
672                     if def.non_enum_variant().fields.len() != 1 {
673                         tcx.sess.fatal(&format!(
674                             "monomorphising SIMD type `{}` with more than one array field",
675                             ty
676                         ));
677                     }
678
679                     // Extract the number of elements from the layout of the array field:
680                     let len = if let Ok(TyAndLayout {
681                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
682                         ..
683                     }) = self.layout_of(f0_ty)
684                     {
685                         count
686                     } else {
687                         return Err(LayoutError::Unknown(ty));
688                     };
689
690                     (*e_ty, *len, true)
691                 } else {
692                     // First ADT field is not an array:
693                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
694                 };
695
696                 // SIMD vectors of zero length are not supported.
697                 //
698                 // Can't be caught in typeck if the array length is generic.
699                 if e_len == 0 {
700                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
701                 }
702
703                 // Compute the ABI of the element type:
704                 let e_ly = self.layout_of(e_ty)?;
705                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
706                     scalar.clone()
707                 } else {
708                     // This error isn't caught in typeck, e.g., if
709                     // the element type of the vector is generic.
710                     tcx.sess.fatal(&format!(
711                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
712                         (integer/float/pointer) element type `{}`",
713                         ty, e_ty
714                     ))
715                 };
716
717                 // Compute the size and alignment of the vector:
718                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
719                 let align = dl.vector_align(size);
720                 let size = size.align_to(align.abi);
721
722                 // Compute the placement of the vector fields:
723                 let fields = if is_array {
724                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
725                 } else {
726                     FieldsShape::Array { stride: e_ly.size, count: e_len }
727                 };
728
729                 tcx.intern_layout(Layout {
730                     variants: Variants::Single { index: VariantIdx::new(0) },
731                     fields,
732                     abi: Abi::Vector { element: e_abi, count: e_len },
733                     largest_niche: e_ly.largest_niche.clone(),
734                     size,
735                     align,
736                 })
737             }
738
739             // ADTs.
740             ty::Adt(def, substs) => {
741                 // Cache the field layouts.
742                 let variants = def
743                     .variants
744                     .iter()
745                     .map(|v| {
746                         v.fields
747                             .iter()
748                             .map(|field| self.layout_of(field.ty(tcx, substs)))
749                             .collect::<Result<Vec<_>, _>>()
750                     })
751                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
752
753                 if def.is_union() {
754                     if def.repr.pack.is_some() && def.repr.align.is_some() {
755                         bug!("union cannot be packed and aligned");
756                     }
757
758                     let mut align =
759                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
760
761                     if let Some(repr_align) = def.repr.align {
762                         align = align.max(AbiAndPrefAlign::new(repr_align));
763                     }
764
765                     let optimize = !def.repr.inhibit_union_abi_opt();
766                     let mut size = Size::ZERO;
767                     let mut abi = Abi::Aggregate { sized: true };
768                     let index = VariantIdx::new(0);
769                     for field in &variants[index] {
770                         assert!(!field.is_unsized());
771                         align = align.max(field.align);
772
773                         // If all non-ZST fields have the same ABI, forward this ABI
774                         if optimize && !field.is_zst() {
775                             // Normalize scalar_unit to the maximal valid range
776                             let field_abi = match &field.abi {
777                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
778                                 Abi::ScalarPair(x, y) => {
779                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
780                                 }
781                                 Abi::Vector { element: x, count } => {
782                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
783                                 }
784                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
785                                     Abi::Aggregate { sized: true }
786                                 }
787                             };
788
789                             if size == Size::ZERO {
790                                 // first non ZST: initialize 'abi'
791                                 abi = field_abi;
792                             } else if abi != field_abi {
793                                 // different fields have different ABI: reset to Aggregate
794                                 abi = Abi::Aggregate { sized: true };
795                             }
796                         }
797
798                         size = cmp::max(size, field.size);
799                     }
800
801                     if let Some(pack) = def.repr.pack {
802                         align = align.min(AbiAndPrefAlign::new(pack));
803                     }
804
805                     return Ok(tcx.intern_layout(Layout {
806                         variants: Variants::Single { index },
807                         fields: FieldsShape::Union(
808                             NonZeroUsize::new(variants[index].len())
809                                 .ok_or(LayoutError::Unknown(ty))?,
810                         ),
811                         abi,
812                         largest_niche: None,
813                         align,
814                         size: size.align_to(align.abi),
815                     }));
816                 }
817
818                 // A variant is absent if it's uninhabited and only has ZST fields.
819                 // Present uninhabited variants only require space for their fields,
820                 // but *not* an encoding of the discriminant (e.g., a tag value).
821                 // See issue #49298 for more details on the need to leave space
822                 // for non-ZST uninhabited data (mostly partial initialization).
823                 let absent = |fields: &[TyAndLayout<'_>]| {
824                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
825                     let is_zst = fields.iter().all(|f| f.is_zst());
826                     uninhabited && is_zst
827                 };
828                 let (present_first, present_second) = {
829                     let mut present_variants = variants
830                         .iter_enumerated()
831                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
832                     (present_variants.next(), present_variants.next())
833                 };
834                 let present_first = match present_first {
835                     Some(present_first) => present_first,
836                     // Uninhabited because it has no variants, or only absent ones.
837                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
838                     // If it's a struct, still compute a layout so that we can still compute the
839                     // field offsets.
840                     None => VariantIdx::new(0),
841                 };
842
843                 let is_struct = !def.is_enum() ||
844                     // Only one variant is present.
845                     (present_second.is_none() &&
846                     // Representation optimizations are allowed.
847                     !def.repr.inhibit_enum_layout_opt());
848                 if is_struct {
849                     // Struct, or univariant enum equivalent to a struct.
850                     // (Typechecking will reject discriminant-sizing attrs.)
851
852                     let v = present_first;
853                     let kind = if def.is_enum() || variants[v].is_empty() {
854                         StructKind::AlwaysSized
855                     } else {
856                         let param_env = tcx.param_env(def.did);
857                         let last_field = def.variants[v].fields.last().unwrap();
858                         let always_sized =
859                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
860                         if !always_sized {
861                             StructKind::MaybeUnsized
862                         } else {
863                             StructKind::AlwaysSized
864                         }
865                     };
866
867                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
868                     st.variants = Variants::Single { index: v };
869                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
870                     match st.abi {
871                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
872                             // the asserts ensure that we are not using the
873                             // `#[rustc_layout_scalar_valid_range(n)]`
874                             // attribute to widen the range of anything as that would probably
875                             // result in UB somewhere
876                             // FIXME(eddyb) the asserts are probably not needed,
877                             // as larger validity ranges would result in missed
878                             // optimizations, *not* wrongly assuming the inner
879                             // value is valid. e.g. unions enlarge validity ranges,
880                             // because the values may be uninitialized.
881                             if let Bound::Included(start) = start {
882                                 // FIXME(eddyb) this might be incorrect - it doesn't
883                                 // account for wrap-around (end < start) ranges.
884                                 assert!(*scalar.valid_range.start() <= start);
885                                 scalar.valid_range = start..=*scalar.valid_range.end();
886                             }
887                             if let Bound::Included(end) = end {
888                                 // FIXME(eddyb) this might be incorrect - it doesn't
889                                 // account for wrap-around (end < start) ranges.
890                                 assert!(*scalar.valid_range.end() >= end);
891                                 scalar.valid_range = *scalar.valid_range.start()..=end;
892                             }
893
894                             // Update `largest_niche` if we have introduced a larger niche.
895                             let niche = if def.repr.hide_niche() {
896                                 None
897                             } else {
898                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
899                             };
900                             if let Some(niche) = niche {
901                                 match &st.largest_niche {
902                                     Some(largest_niche) => {
903                                         // Replace the existing niche even if they're equal,
904                                         // because this one is at a lower offset.
905                                         if largest_niche.available(dl) <= niche.available(dl) {
906                                             st.largest_niche = Some(niche);
907                                         }
908                                     }
909                                     None => st.largest_niche = Some(niche),
910                                 }
911                             }
912                         }
913                         _ => assert!(
914                             start == Bound::Unbounded && end == Bound::Unbounded,
915                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
916                             def,
917                             st,
918                         ),
919                     }
920
921                     return Ok(tcx.intern_layout(st));
922                 }
923
924                 // At this point, we have handled all unions and
925                 // structs. (We have also handled univariant enums
926                 // that allow representation optimization.)
927                 assert!(def.is_enum());
928
929                 // The current code for niche-filling relies on variant indices
930                 // instead of actual discriminants, so dataful enums with
931                 // explicit discriminants (RFC #2363) would misbehave.
932                 let no_explicit_discriminants = def
933                     .variants
934                     .iter_enumerated()
935                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
936
937                 let mut niche_filling_layout = None;
938
939                 // Niche-filling enum optimization.
940                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
941                     let mut dataful_variant = None;
942                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
943
944                     // Find one non-ZST variant.
945                     'variants: for (v, fields) in variants.iter_enumerated() {
946                         if absent(fields) {
947                             continue 'variants;
948                         }
949                         for f in fields {
950                             if !f.is_zst() {
951                                 if dataful_variant.is_none() {
952                                     dataful_variant = Some(v);
953                                     continue 'variants;
954                                 } else {
955                                     dataful_variant = None;
956                                     break 'variants;
957                                 }
958                             }
959                         }
960                         niche_variants = *niche_variants.start().min(&v)..=v;
961                     }
962
963                     if niche_variants.start() > niche_variants.end() {
964                         dataful_variant = None;
965                     }
966
967                     if let Some(i) = dataful_variant {
968                         let count = (niche_variants.end().as_u32()
969                             - niche_variants.start().as_u32()
970                             + 1) as u128;
971
972                         // Find the field with the largest niche
973                         let niche_candidate = variants[i]
974                             .iter()
975                             .enumerate()
976                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
977                             .max_by_key(|(_, niche)| niche.available(dl));
978
979                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
980                             niche_candidate.and_then(|(field_index, niche)| {
981                                 Some((field_index, niche, niche.reserve(self, count)?))
982                             })
983                         {
984                             let mut align = dl.aggregate_align;
985                             let st = variants
986                                 .iter_enumerated()
987                                 .map(|(j, v)| {
988                                     let mut st = self.univariant_uninterned(
989                                         ty,
990                                         v,
991                                         &def.repr,
992                                         StructKind::AlwaysSized,
993                                     )?;
994                                     st.variants = Variants::Single { index: j };
995
996                                     align = align.max(st.align);
997
998                                     Ok(st)
999                                 })
1000                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1001
1002                             let offset = st[i].fields.offset(field_index) + niche.offset;
1003                             let size = st[i].size;
1004
1005                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1006                                 Abi::Uninhabited
1007                             } else {
1008                                 match st[i].abi {
1009                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1010                                     Abi::ScalarPair(ref first, ref second) => {
1011                                         // We need to use scalar_unit to reset the
1012                                         // valid range to the maximal one for that
1013                                         // primitive, because only the niche is
1014                                         // guaranteed to be initialised, not the
1015                                         // other primitive.
1016                                         if offset.bytes() == 0 {
1017                                             Abi::ScalarPair(
1018                                                 niche_scalar.clone(),
1019                                                 scalar_unit(second.value),
1020                                             )
1021                                         } else {
1022                                             Abi::ScalarPair(
1023                                                 scalar_unit(first.value),
1024                                                 niche_scalar.clone(),
1025                                             )
1026                                         }
1027                                     }
1028                                     _ => Abi::Aggregate { sized: true },
1029                                 }
1030                             };
1031
1032                             let largest_niche =
1033                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1034
1035                             niche_filling_layout = Some(Layout {
1036                                 variants: Variants::Multiple {
1037                                     tag: niche_scalar,
1038                                     tag_encoding: TagEncoding::Niche {
1039                                         dataful_variant: i,
1040                                         niche_variants,
1041                                         niche_start,
1042                                     },
1043                                     tag_field: 0,
1044                                     variants: st,
1045                                 },
1046                                 fields: FieldsShape::Arbitrary {
1047                                     offsets: vec![offset],
1048                                     memory_index: vec![0],
1049                                 },
1050                                 abi,
1051                                 largest_niche,
1052                                 size,
1053                                 align,
1054                             });
1055                         }
1056                     }
1057                 }
1058
1059                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1060                 let discr_type = def.repr.discr_type();
1061                 let bits = Integer::from_attr(self, discr_type).size().bits();
1062                 for (i, discr) in def.discriminants(tcx) {
1063                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1064                         continue;
1065                     }
1066                     let mut x = discr.val as i128;
1067                     if discr_type.is_signed() {
1068                         // sign extend the raw representation to be an i128
1069                         x = (x << (128 - bits)) >> (128 - bits);
1070                     }
1071                     if x < min {
1072                         min = x;
1073                     }
1074                     if x > max {
1075                         max = x;
1076                     }
1077                 }
1078                 // We might have no inhabited variants, so pretend there's at least one.
1079                 if (min, max) == (i128::MAX, i128::MIN) {
1080                     min = 0;
1081                     max = 0;
1082                 }
1083                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1084                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1085
1086                 let mut align = dl.aggregate_align;
1087                 let mut size = Size::ZERO;
1088
1089                 // We're interested in the smallest alignment, so start large.
1090                 let mut start_align = Align::from_bytes(256).unwrap();
1091                 assert_eq!(Integer::for_align(dl, start_align), None);
1092
1093                 // repr(C) on an enum tells us to make a (tag, union) layout,
1094                 // so we need to grow the prefix alignment to be at least
1095                 // the alignment of the union. (This value is used both for
1096                 // determining the alignment of the overall enum, and the
1097                 // determining the alignment of the payload after the tag.)
1098                 let mut prefix_align = min_ity.align(dl).abi;
1099                 if def.repr.c() {
1100                     for fields in &variants {
1101                         for field in fields {
1102                             prefix_align = prefix_align.max(field.align.abi);
1103                         }
1104                     }
1105                 }
1106
1107                 // Create the set of structs that represent each variant.
1108                 let mut layout_variants = variants
1109                     .iter_enumerated()
1110                     .map(|(i, field_layouts)| {
1111                         let mut st = self.univariant_uninterned(
1112                             ty,
1113                             &field_layouts,
1114                             &def.repr,
1115                             StructKind::Prefixed(min_ity.size(), prefix_align),
1116                         )?;
1117                         st.variants = Variants::Single { index: i };
1118                         // Find the first field we can't move later
1119                         // to make room for a larger discriminant.
1120                         for field in
1121                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1122                         {
1123                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1124                                 start_align = start_align.min(field.align.abi);
1125                                 break;
1126                             }
1127                         }
1128                         size = cmp::max(size, st.size);
1129                         align = align.max(st.align);
1130                         Ok(st)
1131                     })
1132                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1133
1134                 // Align the maximum variant size to the largest alignment.
1135                 size = size.align_to(align.abi);
1136
1137                 if size.bytes() >= dl.obj_size_bound() {
1138                     return Err(LayoutError::SizeOverflow(ty));
1139                 }
1140
1141                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1142                 if typeck_ity < min_ity {
1143                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1144                     // some reason at this point (based on values discriminant can take on). Mostly
1145                     // because this discriminant will be loaded, and then stored into variable of
1146                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1147                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1148                     // discriminant values. That would be a bug, because then, in codegen, in order
1149                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1150                     // space necessary to represent would have to be discarded (or layout is wrong
1151                     // on thinking it needs 16 bits)
1152                     bug!(
1153                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1154                         min_ity,
1155                         typeck_ity
1156                     );
1157                     // However, it is fine to make discr type however large (as an optimisation)
1158                     // after this point â€“ we’ll just truncate the value we load in codegen.
1159                 }
1160
1161                 // Check to see if we should use a different type for the
1162                 // discriminant. We can safely use a type with the same size
1163                 // as the alignment of the first field of each variant.
1164                 // We increase the size of the discriminant to avoid LLVM copying
1165                 // padding when it doesn't need to. This normally causes unaligned
1166                 // load/stores and excessive memcpy/memset operations. By using a
1167                 // bigger integer size, LLVM can be sure about its contents and
1168                 // won't be so conservative.
1169
1170                 // Use the initial field alignment
1171                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1172                     min_ity
1173                 } else {
1174                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1175                 };
1176
1177                 // If the alignment is not larger than the chosen discriminant size,
1178                 // don't use the alignment as the final size.
1179                 if ity <= min_ity {
1180                     ity = min_ity;
1181                 } else {
1182                     // Patch up the variants' first few fields.
1183                     let old_ity_size = min_ity.size();
1184                     let new_ity_size = ity.size();
1185                     for variant in &mut layout_variants {
1186                         match variant.fields {
1187                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1188                                 for i in offsets {
1189                                     if *i <= old_ity_size {
1190                                         assert_eq!(*i, old_ity_size);
1191                                         *i = new_ity_size;
1192                                     }
1193                                 }
1194                                 // We might be making the struct larger.
1195                                 if variant.size <= old_ity_size {
1196                                     variant.size = new_ity_size;
1197                                 }
1198                             }
1199                             _ => bug!(),
1200                         }
1201                     }
1202                 }
1203
1204                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1205                 let tag = Scalar {
1206                     value: Int(ity, signed),
1207                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1208                 };
1209                 let mut abi = Abi::Aggregate { sized: true };
1210                 if tag.value.size(dl) == size {
1211                     abi = Abi::Scalar(tag.clone());
1212                 } else {
1213                     // Try to use a ScalarPair for all tagged enums.
1214                     let mut common_prim = None;
1215                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1216                         let offsets = match layout_variant.fields {
1217                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1218                             _ => bug!(),
1219                         };
1220                         let mut fields =
1221                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1222                         let (field, offset) = match (fields.next(), fields.next()) {
1223                             (None, None) => continue,
1224                             (Some(pair), None) => pair,
1225                             _ => {
1226                                 common_prim = None;
1227                                 break;
1228                             }
1229                         };
1230                         let prim = match field.abi {
1231                             Abi::Scalar(ref scalar) => scalar.value,
1232                             _ => {
1233                                 common_prim = None;
1234                                 break;
1235                             }
1236                         };
1237                         if let Some(pair) = common_prim {
1238                             // This is pretty conservative. We could go fancier
1239                             // by conflating things like i32 and u32, or even
1240                             // realising that (u8, u8) could just cohabit with
1241                             // u16 or even u32.
1242                             if pair != (prim, offset) {
1243                                 common_prim = None;
1244                                 break;
1245                             }
1246                         } else {
1247                             common_prim = Some((prim, offset));
1248                         }
1249                     }
1250                     if let Some((prim, offset)) = common_prim {
1251                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1252                         let pair_offsets = match pair.fields {
1253                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1254                                 assert_eq!(memory_index, &[0, 1]);
1255                                 offsets
1256                             }
1257                             _ => bug!(),
1258                         };
1259                         if pair_offsets[0] == Size::ZERO
1260                             && pair_offsets[1] == *offset
1261                             && align == pair.align
1262                             && size == pair.size
1263                         {
1264                             // We can use `ScalarPair` only when it matches our
1265                             // already computed layout (including `#[repr(C)]`).
1266                             abi = pair.abi;
1267                         }
1268                     }
1269                 }
1270
1271                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1272                     abi = Abi::Uninhabited;
1273                 }
1274
1275                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1276
1277                 let tagged_layout = Layout {
1278                     variants: Variants::Multiple {
1279                         tag,
1280                         tag_encoding: TagEncoding::Direct,
1281                         tag_field: 0,
1282                         variants: layout_variants,
1283                     },
1284                     fields: FieldsShape::Arbitrary {
1285                         offsets: vec![Size::ZERO],
1286                         memory_index: vec![0],
1287                     },
1288                     largest_niche,
1289                     abi,
1290                     align,
1291                     size,
1292                 };
1293
1294                 let best_layout = match (tagged_layout, niche_filling_layout) {
1295                     (tagged_layout, Some(niche_filling_layout)) => {
1296                         // Pick the smaller layout; otherwise,
1297                         // pick the layout with the larger niche; otherwise,
1298                         // pick tagged as it has simpler codegen.
1299                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1300                             let niche_size =
1301                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1302                             (layout.size, cmp::Reverse(niche_size))
1303                         })
1304                     }
1305                     (tagged_layout, None) => tagged_layout,
1306                 };
1307
1308                 tcx.intern_layout(best_layout)
1309             }
1310
1311             // Types with no meaningful known layout.
1312             ty::Projection(_) | ty::Opaque(..) => {
1313                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1314                 if ty == normalized {
1315                     return Err(LayoutError::Unknown(ty));
1316                 }
1317                 tcx.layout_raw(param_env.and(normalized))?
1318             }
1319
1320             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1321                 bug!("Layout::compute: unexpected type `{}`", ty)
1322             }
1323
1324             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1325                 return Err(LayoutError::Unknown(ty));
1326             }
1327         })
1328     }
1329 }
1330
1331 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1332 #[derive(Clone, Debug, PartialEq)]
1333 enum SavedLocalEligibility {
1334     Unassigned,
1335     Assigned(VariantIdx),
1336     // FIXME: Use newtype_index so we aren't wasting bytes
1337     Ineligible(Option<u32>),
1338 }
1339
1340 // When laying out generators, we divide our saved local fields into two
1341 // categories: overlap-eligible and overlap-ineligible.
1342 //
1343 // Those fields which are ineligible for overlap go in a "prefix" at the
1344 // beginning of the layout, and always have space reserved for them.
1345 //
1346 // Overlap-eligible fields are only assigned to one variant, so we lay
1347 // those fields out for each variant and put them right after the
1348 // prefix.
1349 //
1350 // Finally, in the layout details, we point to the fields from the
1351 // variants they are assigned to. It is possible for some fields to be
1352 // included in multiple variants. No field ever "moves around" in the
1353 // layout; its offset is always the same.
1354 //
1355 // Also included in the layout are the upvars and the discriminant.
1356 // These are included as fields on the "outer" layout; they are not part
1357 // of any variant.
1358 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1359     /// Compute the eligibility and assignment of each local.
1360     fn generator_saved_local_eligibility(
1361         &self,
1362         info: &GeneratorLayout<'tcx>,
1363     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1364         use SavedLocalEligibility::*;
1365
1366         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1367             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1368
1369         // The saved locals not eligible for overlap. These will get
1370         // "promoted" to the prefix of our generator.
1371         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1372
1373         // Figure out which of our saved locals are fields in only
1374         // one variant. The rest are deemed ineligible for overlap.
1375         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1376             for local in fields {
1377                 match assignments[*local] {
1378                     Unassigned => {
1379                         assignments[*local] = Assigned(variant_index);
1380                     }
1381                     Assigned(idx) => {
1382                         // We've already seen this local at another suspension
1383                         // point, so it is no longer a candidate.
1384                         trace!(
1385                             "removing local {:?} in >1 variant ({:?}, {:?})",
1386                             local,
1387                             variant_index,
1388                             idx
1389                         );
1390                         ineligible_locals.insert(*local);
1391                         assignments[*local] = Ineligible(None);
1392                     }
1393                     Ineligible(_) => {}
1394                 }
1395             }
1396         }
1397
1398         // Next, check every pair of eligible locals to see if they
1399         // conflict.
1400         for local_a in info.storage_conflicts.rows() {
1401             let conflicts_a = info.storage_conflicts.count(local_a);
1402             if ineligible_locals.contains(local_a) {
1403                 continue;
1404             }
1405
1406             for local_b in info.storage_conflicts.iter(local_a) {
1407                 // local_a and local_b are storage live at the same time, therefore they
1408                 // cannot overlap in the generator layout. The only way to guarantee
1409                 // this is if they are in the same variant, or one is ineligible
1410                 // (which means it is stored in every variant).
1411                 if ineligible_locals.contains(local_b)
1412                     || assignments[local_a] == assignments[local_b]
1413                 {
1414                     continue;
1415                 }
1416
1417                 // If they conflict, we will choose one to make ineligible.
1418                 // This is not always optimal; it's just a greedy heuristic that
1419                 // seems to produce good results most of the time.
1420                 let conflicts_b = info.storage_conflicts.count(local_b);
1421                 let (remove, other) =
1422                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1423                 ineligible_locals.insert(remove);
1424                 assignments[remove] = Ineligible(None);
1425                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1426             }
1427         }
1428
1429         // Count the number of variants in use. If only one of them, then it is
1430         // impossible to overlap any locals in our layout. In this case it's
1431         // always better to make the remaining locals ineligible, so we can
1432         // lay them out with the other locals in the prefix and eliminate
1433         // unnecessary padding bytes.
1434         {
1435             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1436             for assignment in &assignments {
1437                 if let Assigned(idx) = assignment {
1438                     used_variants.insert(*idx);
1439                 }
1440             }
1441             if used_variants.count() < 2 {
1442                 for assignment in assignments.iter_mut() {
1443                     *assignment = Ineligible(None);
1444                 }
1445                 ineligible_locals.insert_all();
1446             }
1447         }
1448
1449         // Write down the order of our locals that will be promoted to the prefix.
1450         {
1451             for (idx, local) in ineligible_locals.iter().enumerate() {
1452                 assignments[local] = Ineligible(Some(idx as u32));
1453             }
1454         }
1455         debug!("generator saved local assignments: {:?}", assignments);
1456
1457         (ineligible_locals, assignments)
1458     }
1459
1460     /// Compute the full generator layout.
1461     fn generator_layout(
1462         &self,
1463         ty: Ty<'tcx>,
1464         def_id: hir::def_id::DefId,
1465         substs: SubstsRef<'tcx>,
1466     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1467         use SavedLocalEligibility::*;
1468         let tcx = self.tcx;
1469
1470         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1471
1472         let info = tcx.generator_layout(def_id);
1473         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1474
1475         // Build a prefix layout, including "promoting" all ineligible
1476         // locals as part of the prefix. We compute the layout of all of
1477         // these fields at once to get optimal packing.
1478         let tag_index = substs.as_generator().prefix_tys().count();
1479
1480         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1481         let max_discr = (info.variant_fields.len() - 1) as u128;
1482         let discr_int = Integer::fit_unsigned(max_discr);
1483         let discr_int_ty = discr_int.to_ty(tcx, false);
1484         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1485         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1486         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1487
1488         let promoted_layouts = ineligible_locals
1489             .iter()
1490             .map(|local| subst_field(info.field_tys[local]))
1491             .map(|ty| tcx.mk_maybe_uninit(ty))
1492             .map(|ty| self.layout_of(ty));
1493         let prefix_layouts = substs
1494             .as_generator()
1495             .prefix_tys()
1496             .map(|ty| self.layout_of(ty))
1497             .chain(iter::once(Ok(tag_layout)))
1498             .chain(promoted_layouts)
1499             .collect::<Result<Vec<_>, _>>()?;
1500         let prefix = self.univariant_uninterned(
1501             ty,
1502             &prefix_layouts,
1503             &ReprOptions::default(),
1504             StructKind::AlwaysSized,
1505         )?;
1506
1507         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1508
1509         // Split the prefix layout into the "outer" fields (upvars and
1510         // discriminant) and the "promoted" fields. Promoted fields will
1511         // get included in each variant that requested them in
1512         // GeneratorLayout.
1513         debug!("prefix = {:#?}", prefix);
1514         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1515             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1516                 let mut inverse_memory_index = invert_mapping(&memory_index);
1517
1518                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1519                 // "outer" and "promoted" fields respectively.
1520                 let b_start = (tag_index + 1) as u32;
1521                 let offsets_b = offsets.split_off(b_start as usize);
1522                 let offsets_a = offsets;
1523
1524                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1525                 // by preserving the order but keeping only one disjoint "half" each.
1526                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1527                 let inverse_memory_index_b: Vec<_> =
1528                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1529                 inverse_memory_index.retain(|&i| i < b_start);
1530                 let inverse_memory_index_a = inverse_memory_index;
1531
1532                 // Since `inverse_memory_index_{a,b}` each only refer to their
1533                 // respective fields, they can be safely inverted
1534                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1535                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1536
1537                 let outer_fields =
1538                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1539                 (outer_fields, offsets_b, memory_index_b)
1540             }
1541             _ => bug!(),
1542         };
1543
1544         let mut size = prefix.size;
1545         let mut align = prefix.align;
1546         let variants = info
1547             .variant_fields
1548             .iter_enumerated()
1549             .map(|(index, variant_fields)| {
1550                 // Only include overlap-eligible fields when we compute our variant layout.
1551                 let variant_only_tys = variant_fields
1552                     .iter()
1553                     .filter(|local| match assignments[**local] {
1554                         Unassigned => bug!(),
1555                         Assigned(v) if v == index => true,
1556                         Assigned(_) => bug!("assignment does not match variant"),
1557                         Ineligible(_) => false,
1558                     })
1559                     .map(|local| subst_field(info.field_tys[*local]));
1560
1561                 let mut variant = self.univariant_uninterned(
1562                     ty,
1563                     &variant_only_tys
1564                         .map(|ty| self.layout_of(ty))
1565                         .collect::<Result<Vec<_>, _>>()?,
1566                     &ReprOptions::default(),
1567                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1568                 )?;
1569                 variant.variants = Variants::Single { index };
1570
1571                 let (offsets, memory_index) = match variant.fields {
1572                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1573                     _ => bug!(),
1574                 };
1575
1576                 // Now, stitch the promoted and variant-only fields back together in
1577                 // the order they are mentioned by our GeneratorLayout.
1578                 // Because we only use some subset (that can differ between variants)
1579                 // of the promoted fields, we can't just pick those elements of the
1580                 // `promoted_memory_index` (as we'd end up with gaps).
1581                 // So instead, we build an "inverse memory_index", as if all of the
1582                 // promoted fields were being used, but leave the elements not in the
1583                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1584                 // obtain a valid (bijective) mapping.
1585                 const INVALID_FIELD_IDX: u32 = !0;
1586                 let mut combined_inverse_memory_index =
1587                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1588                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1589                 let combined_offsets = variant_fields
1590                     .iter()
1591                     .enumerate()
1592                     .map(|(i, local)| {
1593                         let (offset, memory_index) = match assignments[*local] {
1594                             Unassigned => bug!(),
1595                             Assigned(_) => {
1596                                 let (offset, memory_index) =
1597                                     offsets_and_memory_index.next().unwrap();
1598                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1599                             }
1600                             Ineligible(field_idx) => {
1601                                 let field_idx = field_idx.unwrap() as usize;
1602                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1603                             }
1604                         };
1605                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1606                         offset
1607                     })
1608                     .collect();
1609
1610                 // Remove the unused slots and invert the mapping to obtain the
1611                 // combined `memory_index` (also see previous comment).
1612                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1613                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1614
1615                 variant.fields = FieldsShape::Arbitrary {
1616                     offsets: combined_offsets,
1617                     memory_index: combined_memory_index,
1618                 };
1619
1620                 size = size.max(variant.size);
1621                 align = align.max(variant.align);
1622                 Ok(variant)
1623             })
1624             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1625
1626         size = size.align_to(align.abi);
1627
1628         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1629         {
1630             Abi::Uninhabited
1631         } else {
1632             Abi::Aggregate { sized: true }
1633         };
1634
1635         let layout = tcx.intern_layout(Layout {
1636             variants: Variants::Multiple {
1637                 tag: tag,
1638                 tag_encoding: TagEncoding::Direct,
1639                 tag_field: tag_index,
1640                 variants,
1641             },
1642             fields: outer_fields,
1643             abi,
1644             largest_niche: prefix.largest_niche,
1645             size,
1646             align,
1647         });
1648         debug!("generator layout ({:?}): {:#?}", ty, layout);
1649         Ok(layout)
1650     }
1651
1652     /// This is invoked by the `layout_raw` query to record the final
1653     /// layout of each type.
1654     #[inline(always)]
1655     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1656         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1657         // for dumping later.
1658         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1659             self.record_layout_for_printing_outlined(layout)
1660         }
1661     }
1662
1663     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1664         // Ignore layouts that are done with non-empty environments or
1665         // non-monomorphic layouts, as the user only wants to see the stuff
1666         // resulting from the final codegen session.
1667         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1668             return;
1669         }
1670
1671         // (delay format until we actually need it)
1672         let record = |kind, packed, opt_discr_size, variants| {
1673             let type_desc = format!("{:?}", layout.ty);
1674             self.tcx.sess.code_stats.record_type_size(
1675                 kind,
1676                 type_desc,
1677                 layout.align.abi,
1678                 layout.size,
1679                 packed,
1680                 opt_discr_size,
1681                 variants,
1682             );
1683         };
1684
1685         let adt_def = match *layout.ty.kind() {
1686             ty::Adt(ref adt_def, _) => {
1687                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1688                 adt_def
1689             }
1690
1691             ty::Closure(..) => {
1692                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1693                 record(DataTypeKind::Closure, false, None, vec![]);
1694                 return;
1695             }
1696
1697             _ => {
1698                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1699                 return;
1700             }
1701         };
1702
1703         let adt_kind = adt_def.adt_kind();
1704         let adt_packed = adt_def.repr.pack.is_some();
1705
1706         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1707             let mut min_size = Size::ZERO;
1708             let field_info: Vec<_> = flds
1709                 .iter()
1710                 .enumerate()
1711                 .map(|(i, &name)| match layout.field(self, i) {
1712                     Err(err) => {
1713                         bug!("no layout found for field {}: `{:?}`", name, err);
1714                     }
1715                     Ok(field_layout) => {
1716                         let offset = layout.fields.offset(i);
1717                         let field_end = offset + field_layout.size;
1718                         if min_size < field_end {
1719                             min_size = field_end;
1720                         }
1721                         FieldInfo {
1722                             name: name.to_string(),
1723                             offset: offset.bytes(),
1724                             size: field_layout.size.bytes(),
1725                             align: field_layout.align.abi.bytes(),
1726                         }
1727                     }
1728                 })
1729                 .collect();
1730
1731             VariantInfo {
1732                 name: n.map(|n| n.to_string()),
1733                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1734                 align: layout.align.abi.bytes(),
1735                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1736                 fields: field_info,
1737             }
1738         };
1739
1740         match layout.variants {
1741             Variants::Single { index } => {
1742                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1743                 if !adt_def.variants.is_empty() {
1744                     let variant_def = &adt_def.variants[index];
1745                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1746                     record(
1747                         adt_kind.into(),
1748                         adt_packed,
1749                         None,
1750                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1751                     );
1752                 } else {
1753                     // (This case arises for *empty* enums; so give it
1754                     // zero variants.)
1755                     record(adt_kind.into(), adt_packed, None, vec![]);
1756                 }
1757             }
1758
1759             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1760                 debug!(
1761                     "print-type-size `{:#?}` adt general variants def {}",
1762                     layout.ty,
1763                     adt_def.variants.len()
1764                 );
1765                 let variant_infos: Vec<_> = adt_def
1766                     .variants
1767                     .iter_enumerated()
1768                     .map(|(i, variant_def)| {
1769                         let fields: Vec<_> =
1770                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1771                         build_variant_info(
1772                             Some(variant_def.ident),
1773                             &fields,
1774                             layout.for_variant(self, i),
1775                         )
1776                     })
1777                     .collect();
1778                 record(
1779                     adt_kind.into(),
1780                     adt_packed,
1781                     match tag_encoding {
1782                         TagEncoding::Direct => Some(tag.value.size(self)),
1783                         _ => None,
1784                     },
1785                     variant_infos,
1786                 );
1787             }
1788         }
1789     }
1790 }
1791
1792 /// Type size "skeleton", i.e., the only information determining a type's size.
1793 /// While this is conservative, (aside from constant sizes, only pointers,
1794 /// newtypes thereof and null pointer optimized enums are allowed), it is
1795 /// enough to statically check common use cases of transmute.
1796 #[derive(Copy, Clone, Debug)]
1797 pub enum SizeSkeleton<'tcx> {
1798     /// Any statically computable Layout.
1799     Known(Size),
1800
1801     /// A potentially-fat pointer.
1802     Pointer {
1803         /// If true, this pointer is never null.
1804         non_zero: bool,
1805         /// The type which determines the unsized metadata, if any,
1806         /// of this pointer. Either a type parameter or a projection
1807         /// depending on one, with regions erased.
1808         tail: Ty<'tcx>,
1809     },
1810 }
1811
1812 impl<'tcx> SizeSkeleton<'tcx> {
1813     pub fn compute(
1814         ty: Ty<'tcx>,
1815         tcx: TyCtxt<'tcx>,
1816         param_env: ty::ParamEnv<'tcx>,
1817     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1818         debug_assert!(!ty.has_infer_types_or_consts());
1819
1820         // First try computing a static layout.
1821         let err = match tcx.layout_of(param_env.and(ty)) {
1822             Ok(layout) => {
1823                 return Ok(SizeSkeleton::Known(layout.size));
1824             }
1825             Err(err) => err,
1826         };
1827
1828         match *ty.kind() {
1829             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1830                 let non_zero = !ty.is_unsafe_ptr();
1831                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1832                 match tail.kind() {
1833                     ty::Param(_) | ty::Projection(_) => {
1834                         debug_assert!(tail.has_param_types_or_consts());
1835                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1836                     }
1837                     _ => bug!(
1838                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1839                               tail `{}` is not a type parameter or a projection",
1840                         ty,
1841                         err,
1842                         tail
1843                     ),
1844                 }
1845             }
1846
1847             ty::Adt(def, substs) => {
1848                 // Only newtypes and enums w/ nullable pointer optimization.
1849                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1850                     return Err(err);
1851                 }
1852
1853                 // Get a zero-sized variant or a pointer newtype.
1854                 let zero_or_ptr_variant = |i| {
1855                     let i = VariantIdx::new(i);
1856                     let fields = def.variants[i]
1857                         .fields
1858                         .iter()
1859                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1860                     let mut ptr = None;
1861                     for field in fields {
1862                         let field = field?;
1863                         match field {
1864                             SizeSkeleton::Known(size) => {
1865                                 if size.bytes() > 0 {
1866                                     return Err(err);
1867                                 }
1868                             }
1869                             SizeSkeleton::Pointer { .. } => {
1870                                 if ptr.is_some() {
1871                                     return Err(err);
1872                                 }
1873                                 ptr = Some(field);
1874                             }
1875                         }
1876                     }
1877                     Ok(ptr)
1878                 };
1879
1880                 let v0 = zero_or_ptr_variant(0)?;
1881                 // Newtype.
1882                 if def.variants.len() == 1 {
1883                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1884                         return Ok(SizeSkeleton::Pointer {
1885                             non_zero: non_zero
1886                                 || match tcx.layout_scalar_valid_range(def.did) {
1887                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1888                                     (Bound::Included(start), Bound::Included(end)) => {
1889                                         0 < start && start < end
1890                                     }
1891                                     _ => false,
1892                                 },
1893                             tail,
1894                         });
1895                     } else {
1896                         return Err(err);
1897                     }
1898                 }
1899
1900                 let v1 = zero_or_ptr_variant(1)?;
1901                 // Nullable pointer enum optimization.
1902                 match (v0, v1) {
1903                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1904                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1905                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1906                     }
1907                     _ => Err(err),
1908                 }
1909             }
1910
1911             ty::Projection(_) | ty::Opaque(..) => {
1912                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1913                 if ty == normalized {
1914                     Err(err)
1915                 } else {
1916                     SizeSkeleton::compute(normalized, tcx, param_env)
1917                 }
1918             }
1919
1920             _ => Err(err),
1921         }
1922     }
1923
1924     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1925         match (self, other) {
1926             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1927             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1928                 a == b
1929             }
1930             _ => false,
1931         }
1932     }
1933 }
1934
1935 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1936     fn tcx(&self) -> TyCtxt<'tcx>;
1937 }
1938
1939 pub trait HasParamEnv<'tcx> {
1940     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1941 }
1942
1943 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1944     fn data_layout(&self) -> &TargetDataLayout {
1945         &self.data_layout
1946     }
1947 }
1948
1949 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1950     fn tcx(&self) -> TyCtxt<'tcx> {
1951         *self
1952     }
1953 }
1954
1955 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1956     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1957         self.param_env
1958     }
1959 }
1960
1961 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1962     fn data_layout(&self) -> &TargetDataLayout {
1963         self.tcx.data_layout()
1964     }
1965 }
1966
1967 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1968     fn tcx(&self) -> TyCtxt<'tcx> {
1969         self.tcx.tcx()
1970     }
1971 }
1972
1973 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1974
1975 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1976     type Ty = Ty<'tcx>;
1977     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1978
1979     /// Computes the layout of a type. Note that this implicitly
1980     /// executes in "reveal all" mode.
1981     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1982         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1983         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1984         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1985         let layout = TyAndLayout { ty, layout };
1986
1987         // N.B., this recording is normally disabled; when enabled, it
1988         // can however trigger recursive invocations of `layout_of`.
1989         // Therefore, we execute it *after* the main query has
1990         // completed, to avoid problems around recursive structures
1991         // and the like. (Admittedly, I wasn't able to reproduce a problem
1992         // here, but it seems like the right thing to do. -nmatsakis)
1993         self.record_layout_for_printing(layout);
1994
1995         Ok(layout)
1996     }
1997 }
1998
1999 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2000     type Ty = Ty<'tcx>;
2001     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2002
2003     /// Computes the layout of a type. Note that this implicitly
2004     /// executes in "reveal all" mode.
2005     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2006         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2007         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2008         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2009         let layout = TyAndLayout { ty, layout };
2010
2011         // N.B., this recording is normally disabled; when enabled, it
2012         // can however trigger recursive invocations of `layout_of`.
2013         // Therefore, we execute it *after* the main query has
2014         // completed, to avoid problems around recursive structures
2015         // and the like. (Admittedly, I wasn't able to reproduce a problem
2016         // here, but it seems like the right thing to do. -nmatsakis)
2017         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2018         cx.record_layout_for_printing(layout);
2019
2020         Ok(layout)
2021     }
2022 }
2023
2024 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2025 impl TyCtxt<'tcx> {
2026     /// Computes the layout of a type. Note that this implicitly
2027     /// executes in "reveal all" mode.
2028     #[inline]
2029     pub fn layout_of(
2030         self,
2031         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2032     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2033         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2034         cx.layout_of(param_env_and_ty.value)
2035     }
2036 }
2037
2038 impl ty::query::TyCtxtAt<'tcx> {
2039     /// Computes the layout of a type. Note that this implicitly
2040     /// executes in "reveal all" mode.
2041     #[inline]
2042     pub fn layout_of(
2043         self,
2044         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2045     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2046         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2047         cx.layout_of(param_env_and_ty.value)
2048     }
2049 }
2050
2051 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2052 where
2053     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2054         + HasTyCtxt<'tcx>
2055         + HasParamEnv<'tcx>,
2056 {
2057     fn for_variant(
2058         this: TyAndLayout<'tcx>,
2059         cx: &C,
2060         variant_index: VariantIdx,
2061     ) -> TyAndLayout<'tcx> {
2062         let layout = match this.variants {
2063             Variants::Single { index }
2064                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2065                 if index == variant_index &&
2066                 // Don't confuse variants of uninhabited enums with the enum itself.
2067                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2068                 this.fields != FieldsShape::Primitive =>
2069             {
2070                 this.layout
2071             }
2072
2073             Variants::Single { index } => {
2074                 // Deny calling for_variant more than once for non-Single enums.
2075                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2076                     assert_eq!(original_layout.variants, Variants::Single { index });
2077                 }
2078
2079                 let fields = match this.ty.kind() {
2080                     ty::Adt(def, _) if def.variants.is_empty() =>
2081                         bug!("for_variant called on zero-variant enum"),
2082                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2083                     _ => bug!(),
2084                 };
2085                 let tcx = cx.tcx();
2086                 tcx.intern_layout(Layout {
2087                     variants: Variants::Single { index: variant_index },
2088                     fields: match NonZeroUsize::new(fields) {
2089                         Some(fields) => FieldsShape::Union(fields),
2090                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2091                     },
2092                     abi: Abi::Uninhabited,
2093                     largest_niche: None,
2094                     align: tcx.data_layout.i8_align,
2095                     size: Size::ZERO,
2096                 })
2097             }
2098
2099             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2100         };
2101
2102         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2103
2104         TyAndLayout { ty: this.ty, layout }
2105     }
2106
2107     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2108         let tcx = cx.tcx();
2109         let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2110             let layout = Layout::scalar(cx, tag.clone());
2111             MaybeResult::from(Ok(TyAndLayout {
2112                 layout: tcx.intern_layout(layout),
2113                 ty: tag.value.to_ty(tcx),
2114             }))
2115         };
2116
2117         cx.layout_of(match *this.ty.kind() {
2118             ty::Bool
2119             | ty::Char
2120             | ty::Int(_)
2121             | ty::Uint(_)
2122             | ty::Float(_)
2123             | ty::FnPtr(_)
2124             | ty::Never
2125             | ty::FnDef(..)
2126             | ty::GeneratorWitness(..)
2127             | ty::Foreign(..)
2128             | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2129
2130             // Potentially-fat pointers.
2131             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2132                 assert!(i < this.fields.count());
2133
2134                 // Reuse the fat `*T` type as its own thin pointer data field.
2135                 // This provides information about, e.g., DST struct pointees
2136                 // (which may have no non-DST form), and will work as long
2137                 // as the `Abi` or `FieldsShape` is checked by users.
2138                 if i == 0 {
2139                     let nil = tcx.mk_unit();
2140                     let ptr_ty = if this.ty.is_unsafe_ptr() {
2141                         tcx.mk_mut_ptr(nil)
2142                     } else {
2143                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2144                     };
2145                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2146                         |mut ptr_layout| {
2147                             ptr_layout.ty = this.ty;
2148                             ptr_layout
2149                         },
2150                     ));
2151                 }
2152
2153                 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2154                     ty::Slice(_) | ty::Str => tcx.types.usize,
2155                     ty::Dynamic(_, _) => {
2156                         tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2157                         /* FIXME: use actual fn pointers
2158                         Warning: naively computing the number of entries in the
2159                         vtable by counting the methods on the trait + methods on
2160                         all parent traits does not work, because some methods can
2161                         be not object safe and thus excluded from the vtable.
2162                         Increase this counter if you tried to implement this but
2163                         failed to do it without duplicating a lot of code from
2164                         other places in the compiler: 2
2165                         tcx.mk_tup(&[
2166                             tcx.mk_array(tcx.types.usize, 3),
2167                             tcx.mk_array(Option<fn()>),
2168                         ])
2169                         */
2170                     }
2171                     _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2172                 }
2173             }
2174
2175             // Arrays and slices.
2176             ty::Array(element, _) | ty::Slice(element) => element,
2177             ty::Str => tcx.types.u8,
2178
2179             // Tuples, generators and closures.
2180             ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
2181
2182             ty::Generator(def_id, ref substs, _) => match this.variants {
2183                 Variants::Single { index } => substs
2184                     .as_generator()
2185                     .state_tys(def_id, tcx)
2186                     .nth(index.as_usize())
2187                     .unwrap()
2188                     .nth(i)
2189                     .unwrap(),
2190                 Variants::Multiple { ref tag, tag_field, .. } => {
2191                     if i == tag_field {
2192                         return tag_layout(tag);
2193                     }
2194                     substs.as_generator().prefix_tys().nth(i).unwrap()
2195                 }
2196             },
2197
2198             ty::Tuple(tys) => tys[i].expect_ty(),
2199
2200             // ADTs.
2201             ty::Adt(def, substs) => {
2202                 match this.variants {
2203                     Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2204
2205                     // Discriminant field for enums (where applicable).
2206                     Variants::Multiple { ref tag, .. } => {
2207                         assert_eq!(i, 0);
2208                         return tag_layout(tag);
2209                     }
2210                 }
2211             }
2212
2213             ty::Projection(_)
2214             | ty::Bound(..)
2215             | ty::Placeholder(..)
2216             | ty::Opaque(..)
2217             | ty::Param(_)
2218             | ty::Infer(_)
2219             | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2220         })
2221     }
2222
2223     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2224         let addr_space_of_ty = |ty: Ty<'tcx>| {
2225             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2226         };
2227
2228         let pointee_info = match *this.ty.kind() {
2229             ty::RawPtr(mt) if offset.bytes() == 0 => {
2230                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2231                     size: layout.size,
2232                     align: layout.align.abi,
2233                     safe: None,
2234                     address_space: addr_space_of_ty(mt.ty),
2235                 })
2236             }
2237             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2238                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2239                     PointeeInfo {
2240                         size: layout.size,
2241                         align: layout.align.abi,
2242                         safe: None,
2243                         address_space: cx.data_layout().instruction_address_space,
2244                     }
2245                 })
2246             }
2247             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2248                 let address_space = addr_space_of_ty(ty);
2249                 let tcx = cx.tcx();
2250                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2251                 let kind = match mt {
2252                     hir::Mutability::Not => {
2253                         if is_freeze {
2254                             PointerKind::Frozen
2255                         } else {
2256                             PointerKind::Shared
2257                         }
2258                     }
2259                     hir::Mutability::Mut => {
2260                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2261                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2262                         // in conjunction with unwinding, but later versions didn’t seem to have
2263                         // said issues. See issue #31681.
2264                         //
2265                         // Alas, later on we encountered a case where noalias would generate wrong
2266                         // code altogether even with recent versions of LLVM in *safe* code with no
2267                         // unwinding involved. See #54462.
2268                         //
2269                         // For now, do not enable mutable_noalias by default at all, while the
2270                         // issue is being figured out.
2271                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2272                             PointerKind::UniqueBorrowed
2273                         } else {
2274                             PointerKind::Shared
2275                         }
2276                     }
2277                 };
2278
2279                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2280                     size: layout.size,
2281                     align: layout.align.abi,
2282                     safe: Some(kind),
2283                     address_space,
2284                 })
2285             }
2286
2287             _ => {
2288                 let mut data_variant = match this.variants {
2289                     // Within the discriminant field, only the niche itself is
2290                     // always initialized, so we only check for a pointer at its
2291                     // offset.
2292                     //
2293                     // If the niche is a pointer, it's either valid (according
2294                     // to its type), or null (which the niche field's scalar
2295                     // validity range encodes).  This allows using
2296                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2297                     // this will continue to work as long as we don't start
2298                     // using more niches than just null (e.g., the first page of
2299                     // the address space, or unaligned pointers).
2300                     Variants::Multiple {
2301                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2302                         tag_field,
2303                         ..
2304                     } if this.fields.offset(tag_field) == offset => {
2305                         Some(this.for_variant(cx, dataful_variant))
2306                     }
2307                     _ => Some(this),
2308                 };
2309
2310                 if let Some(variant) = data_variant {
2311                     // We're not interested in any unions.
2312                     if let FieldsShape::Union(_) = variant.fields {
2313                         data_variant = None;
2314                     }
2315                 }
2316
2317                 let mut result = None;
2318
2319                 if let Some(variant) = data_variant {
2320                     let ptr_end = offset + Pointer.size(cx);
2321                     for i in 0..variant.fields.count() {
2322                         let field_start = variant.fields.offset(i);
2323                         if field_start <= offset {
2324                             let field = variant.field(cx, i);
2325                             result = field.to_result().ok().and_then(|field| {
2326                                 if ptr_end <= field_start + field.size {
2327                                     // We found the right field, look inside it.
2328                                     let field_info =
2329                                         field.pointee_info_at(cx, offset - field_start);
2330                                     field_info
2331                                 } else {
2332                                     None
2333                                 }
2334                             });
2335                             if result.is_some() {
2336                                 break;
2337                             }
2338                         }
2339                     }
2340                 }
2341
2342                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2343                 if let Some(ref mut pointee) = result {
2344                     if let ty::Adt(def, _) = this.ty.kind() {
2345                         if def.is_box() && offset.bytes() == 0 {
2346                             pointee.safe = Some(PointerKind::UniqueOwned);
2347                         }
2348                     }
2349                 }
2350
2351                 result
2352             }
2353         };
2354
2355         debug!(
2356             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2357             offset,
2358             this.ty.kind(),
2359             pointee_info
2360         );
2361
2362         pointee_info
2363     }
2364 }
2365
2366 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2367     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2368         use crate::ty::layout::LayoutError::*;
2369         mem::discriminant(self).hash_stable(hcx, hasher);
2370
2371         match *self {
2372             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2373         }
2374     }
2375 }
2376
2377 impl<'tcx> ty::Instance<'tcx> {
2378     // NOTE(eddyb) this is private to avoid using it from outside of
2379     // `FnAbi::of_instance` - any other uses are either too high-level
2380     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2381     // or should go through `FnAbi` instead, to avoid losing any
2382     // adjustments `FnAbi::of_instance` might be performing.
2383     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2384         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2385         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2386         match *ty.kind() {
2387             ty::FnDef(..) => {
2388                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2389                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2390                 // (i.e. due to being inside a projection that got normalized, see
2391                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2392                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2393                 let mut sig = match *ty.kind() {
2394                     ty::FnDef(def_id, substs) => tcx
2395                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2396                         .subst(tcx, substs),
2397                     _ => unreachable!(),
2398                 };
2399
2400                 if let ty::InstanceDef::VtableShim(..) = self.def {
2401                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2402                     sig = sig.map_bound(|mut sig| {
2403                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2404                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2405                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2406                         sig
2407                     });
2408                 }
2409                 sig
2410             }
2411             ty::Closure(def_id, substs) => {
2412                 let sig = substs.as_closure().sig();
2413
2414                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2415                 sig.map_bound(|sig| {
2416                     tcx.mk_fn_sig(
2417                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2418                         sig.output(),
2419                         sig.c_variadic,
2420                         sig.unsafety,
2421                         sig.abi,
2422                     )
2423                 })
2424             }
2425             ty::Generator(_, substs, _) => {
2426                 let sig = substs.as_generator().poly_sig();
2427
2428                 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2429                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2430
2431                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2432                 let pin_adt_ref = tcx.adt_def(pin_did);
2433                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2434                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2435
2436                 sig.map_bound(|sig| {
2437                     let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2438                     let state_adt_ref = tcx.adt_def(state_did);
2439                     let state_substs =
2440                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2441                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2442
2443                     tcx.mk_fn_sig(
2444                         [env_ty, sig.resume_ty].iter(),
2445                         &ret_ty,
2446                         false,
2447                         hir::Unsafety::Normal,
2448                         rustc_target::spec::abi::Abi::Rust,
2449                     )
2450                 })
2451             }
2452             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2453         }
2454     }
2455 }
2456
2457 pub trait FnAbiExt<'tcx, C>
2458 where
2459     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2460         + HasDataLayout
2461         + HasTargetSpec
2462         + HasTyCtxt<'tcx>
2463         + HasParamEnv<'tcx>,
2464 {
2465     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2466     ///
2467     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2468     /// instead, where the instance is a `InstanceDef::Virtual`.
2469     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2470
2471     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2472     /// direct calls to an `fn`.
2473     ///
2474     /// NB: that includes virtual calls, which are represented by "direct calls"
2475     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2476     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2477
2478     fn new_internal(
2479         cx: &C,
2480         sig: ty::PolyFnSig<'tcx>,
2481         extra_args: &[Ty<'tcx>],
2482         caller_location: Option<Ty<'tcx>>,
2483         codegen_fn_attr_flags: CodegenFnAttrFlags,
2484         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2485     ) -> Self;
2486     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2487 }
2488
2489 fn fn_can_unwind(
2490     panic_strategy: PanicStrategy,
2491     codegen_fn_attr_flags: CodegenFnAttrFlags,
2492     call_conv: Conv,
2493 ) -> bool {
2494     if panic_strategy != PanicStrategy::Unwind {
2495         // In panic=abort mode we assume nothing can unwind anywhere, so
2496         // optimize based on this!
2497         false
2498     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2499         // If a specific #[unwind] attribute is present, use that.
2500         true
2501     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2502         // Special attribute for allocator functions, which can't unwind.
2503         false
2504     } else {
2505         if call_conv == Conv::Rust {
2506             // Any Rust method (or `extern "Rust" fn` or `extern
2507             // "rust-call" fn`) is explicitly allowed to unwind
2508             // (unless it has no-unwind attribute, handled above).
2509             true
2510         } else {
2511             // Anything else is either:
2512             //
2513             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2514             //
2515             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2516             //
2517             // Foreign items (case 1) are assumed to not unwind; it is
2518             // UB otherwise. (At least for now; see also
2519             // rust-lang/rust#63909 and Rust RFC 2753.)
2520             //
2521             // Items defined in Rust with non-Rust ABIs (case 2) are also
2522             // not supposed to unwind. Whether this should be enforced
2523             // (versus stating it is UB) and *how* it would be enforced
2524             // is currently under discussion; see rust-lang/rust#58794.
2525             //
2526             // In either case, we mark item as explicitly nounwind.
2527             false
2528         }
2529     }
2530 }
2531
2532 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2533 where
2534     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2535         + HasDataLayout
2536         + HasTargetSpec
2537         + HasTyCtxt<'tcx>
2538         + HasParamEnv<'tcx>,
2539 {
2540     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2541         // Assume that fn pointers may always unwind
2542         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2543
2544         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2545             ArgAbi::new(cx.layout_of(ty))
2546         })
2547     }
2548
2549     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2550         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2551
2552         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2553             Some(cx.tcx().caller_location_ty())
2554         } else {
2555             None
2556         };
2557
2558         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2559
2560         call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2561             let mut layout = cx.layout_of(ty);
2562             // Don't pass the vtable, it's not an argument of the virtual fn.
2563             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2564             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2565             if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2566                 let fat_pointer_ty = if layout.is_unsized() {
2567                     // unsized `self` is passed as a pointer to `self`
2568                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2569                     cx.tcx().mk_mut_ptr(layout.ty)
2570                 } else {
2571                     match layout.abi {
2572                         Abi::ScalarPair(..) => (),
2573                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2574                     }
2575
2576                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2577                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2578                     // elsewhere in the compiler as a method on a `dyn Trait`.
2579                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2580                     // get a built-in pointer type
2581                     let mut fat_pointer_layout = layout;
2582                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2583                         && !fat_pointer_layout.ty.is_region_ptr()
2584                     {
2585                         for i in 0..fat_pointer_layout.fields.count() {
2586                             let field_layout = fat_pointer_layout.field(cx, i);
2587
2588                             if !field_layout.is_zst() {
2589                                 fat_pointer_layout = field_layout;
2590                                 continue 'descend_newtypes;
2591                             }
2592                         }
2593
2594                         bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2595                     }
2596
2597                     fat_pointer_layout.ty
2598                 };
2599
2600                 // we now have a type like `*mut RcBox<dyn Trait>`
2601                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2602                 // this is understood as a special case elsewhere in the compiler
2603                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2604                 layout = cx.layout_of(unit_pointer_ty);
2605                 layout.ty = fat_pointer_ty;
2606             }
2607             ArgAbi::new(layout)
2608         })
2609     }
2610
2611     fn new_internal(
2612         cx: &C,
2613         sig: ty::PolyFnSig<'tcx>,
2614         extra_args: &[Ty<'tcx>],
2615         caller_location: Option<Ty<'tcx>>,
2616         codegen_fn_attr_flags: CodegenFnAttrFlags,
2617         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2618     ) -> Self {
2619         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2620
2621         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2622
2623         use rustc_target::spec::abi::Abi::*;
2624         let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2625             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2626
2627             // It's the ABI's job to select this, not ours.
2628             System => bug!("system abi should be selected elsewhere"),
2629             EfiApi => bug!("eficall abi should be selected elsewhere"),
2630
2631             Stdcall => Conv::X86Stdcall,
2632             Fastcall => Conv::X86Fastcall,
2633             Vectorcall => Conv::X86VectorCall,
2634             Thiscall => Conv::X86ThisCall,
2635             C => Conv::C,
2636             Unadjusted => Conv::C,
2637             Win64 => Conv::X86_64Win64,
2638             SysV64 => Conv::X86_64SysV,
2639             Aapcs => Conv::ArmAapcs,
2640             PtxKernel => Conv::PtxKernel,
2641             Msp430Interrupt => Conv::Msp430Intr,
2642             X86Interrupt => Conv::X86Intr,
2643             AmdGpuKernel => Conv::AmdGpuKernel,
2644             AvrInterrupt => Conv::AvrInterrupt,
2645             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2646
2647             // These API constants ought to be more specific...
2648             Cdecl => Conv::C,
2649         };
2650
2651         let mut inputs = sig.inputs();
2652         let extra_args = if sig.abi == RustCall {
2653             assert!(!sig.c_variadic && extra_args.is_empty());
2654
2655             if let Some(input) = sig.inputs().last() {
2656                 if let ty::Tuple(tupled_arguments) = input.kind() {
2657                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2658                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2659                 } else {
2660                     bug!(
2661                         "argument to function with \"rust-call\" ABI \
2662                             is not a tuple"
2663                     );
2664                 }
2665             } else {
2666                 bug!(
2667                     "argument to function with \"rust-call\" ABI \
2668                         is not a tuple"
2669                 );
2670             }
2671         } else {
2672             assert!(sig.c_variadic || extra_args.is_empty());
2673             extra_args.to_vec()
2674         };
2675
2676         let target = &cx.tcx().sess.target;
2677         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2678         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2679         let linux_s390x_gnu_like =
2680             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2681         let linux_sparc64_gnu_like =
2682             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2683         let linux_powerpc_gnu_like =
2684             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2685         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2686
2687         // Handle safe Rust thin and fat pointers.
2688         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2689                                       scalar: &Scalar,
2690                                       layout: TyAndLayout<'tcx>,
2691                                       offset: Size,
2692                                       is_return: bool| {
2693             // Booleans are always an i1 that needs to be zero-extended.
2694             if scalar.is_bool() {
2695                 attrs.ext(ArgExtension::Zext);
2696                 return;
2697             }
2698
2699             // Only pointer types handled below.
2700             if scalar.value != Pointer {
2701                 return;
2702             }
2703
2704             if scalar.valid_range.start() < scalar.valid_range.end() {
2705                 if *scalar.valid_range.start() > 0 {
2706                     attrs.set(ArgAttribute::NonNull);
2707                 }
2708             }
2709
2710             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2711                 if let Some(kind) = pointee.safe {
2712                     attrs.pointee_align = Some(pointee.align);
2713
2714                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2715                     // for the entire duration of the function as they can be deallocated
2716                     // at any time. Set their valid size to 0.
2717                     attrs.pointee_size = match kind {
2718                         PointerKind::UniqueOwned => Size::ZERO,
2719                         _ => pointee.size,
2720                     };
2721
2722                     // `Box` pointer parameters never alias because ownership is transferred
2723                     // `&mut` pointer parameters never alias other parameters,
2724                     // or mutable global data
2725                     //
2726                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2727                     // and can be marked as both `readonly` and `noalias`, as
2728                     // LLVM's definition of `noalias` is based solely on memory
2729                     // dependencies rather than pointer equality
2730                     let no_alias = match kind {
2731                         PointerKind::Shared => false,
2732                         PointerKind::UniqueOwned => true,
2733                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2734                     };
2735                     if no_alias {
2736                         attrs.set(ArgAttribute::NoAlias);
2737                     }
2738
2739                     if kind == PointerKind::Frozen && !is_return {
2740                         attrs.set(ArgAttribute::ReadOnly);
2741                     }
2742                 }
2743             }
2744         };
2745
2746         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2747             let is_return = arg_idx.is_none();
2748             let mut arg = mk_arg_type(ty, arg_idx);
2749             if arg.layout.is_zst() {
2750                 // For some forsaken reason, x86_64-pc-windows-gnu
2751                 // doesn't ignore zero-sized struct arguments.
2752                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2753                 if is_return
2754                     || rust_abi
2755                     || (!win_x64_gnu
2756                         && !linux_s390x_gnu_like
2757                         && !linux_sparc64_gnu_like
2758                         && !linux_powerpc_gnu_like)
2759                 {
2760                     arg.mode = PassMode::Ignore;
2761                 }
2762             }
2763
2764             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2765             if !is_return && rust_abi {
2766                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2767                     let mut a_attrs = ArgAttributes::new();
2768                     let mut b_attrs = ArgAttributes::new();
2769                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2770                     adjust_for_rust_scalar(
2771                         &mut b_attrs,
2772                         b,
2773                         arg.layout,
2774                         a.value.size(cx).align_to(b.value.align(cx).abi),
2775                         false,
2776                     );
2777                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2778                     return arg;
2779                 }
2780             }
2781
2782             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2783                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2784                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2785                 }
2786             }
2787
2788             arg
2789         };
2790
2791         let mut fn_abi = FnAbi {
2792             ret: arg_of(sig.output(), None),
2793             args: inputs
2794                 .iter()
2795                 .cloned()
2796                 .chain(extra_args)
2797                 .chain(caller_location)
2798                 .enumerate()
2799                 .map(|(i, ty)| arg_of(ty, Some(i)))
2800                 .collect(),
2801             c_variadic: sig.c_variadic,
2802             fixed_count: inputs.len(),
2803             conv,
2804             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2805         };
2806         fn_abi.adjust_for_abi(cx, sig.abi);
2807         debug!("FnAbi::new_internal = {:?}", fn_abi);
2808         fn_abi
2809     }
2810
2811     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2812         if abi == SpecAbi::Unadjusted {
2813             return;
2814         }
2815
2816         if abi == SpecAbi::Rust
2817             || abi == SpecAbi::RustCall
2818             || abi == SpecAbi::RustIntrinsic
2819             || abi == SpecAbi::PlatformIntrinsic
2820         {
2821             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, is_ret: bool| {
2822                 if arg.is_ignore() {
2823                     return;
2824                 }
2825
2826                 match arg.layout.abi {
2827                     Abi::Aggregate { .. } => {}
2828
2829                     // This is a fun case! The gist of what this is doing is
2830                     // that we want callers and callees to always agree on the
2831                     // ABI of how they pass SIMD arguments. If we were to *not*
2832                     // make these arguments indirect then they'd be immediates
2833                     // in LLVM, which means that they'd used whatever the
2834                     // appropriate ABI is for the callee and the caller. That
2835                     // means, for example, if the caller doesn't have AVX
2836                     // enabled but the callee does, then passing an AVX argument
2837                     // across this boundary would cause corrupt data to show up.
2838                     //
2839                     // This problem is fixed by unconditionally passing SIMD
2840                     // arguments through memory between callers and callees
2841                     // which should get them all to agree on ABI regardless of
2842                     // target feature sets. Some more information about this
2843                     // issue can be found in #44367.
2844                     //
2845                     // Note that the platform intrinsic ABI is exempt here as
2846                     // that's how we connect up to LLVM and it's unstable
2847                     // anyway, we control all calls to it in libstd.
2848                     Abi::Vector { .. }
2849                         if abi != SpecAbi::PlatformIntrinsic
2850                             && cx.tcx().sess.target.simd_types_indirect =>
2851                     {
2852                         arg.make_indirect();
2853                         return;
2854                     }
2855
2856                     _ => return,
2857                 }
2858
2859                 // Return structures up to 2 pointers in size by value, matching `ScalarPair`. LLVM
2860                 // will usually return these in 2 registers, which is more efficient than by-ref.
2861                 let max_by_val_size = if is_ret { Pointer.size(cx) * 2 } else { Pointer.size(cx) };
2862                 let size = arg.layout.size;
2863
2864                 if arg.layout.is_unsized() || size > max_by_val_size {
2865                     arg.make_indirect();
2866                 } else {
2867                     // We want to pass small aggregates as immediates, but using
2868                     // a LLVM aggregate type for this leads to bad optimizations,
2869                     // so we pick an appropriately sized integer type instead.
2870                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2871                 }
2872             };
2873             fixup(&mut self.ret, true);
2874             for arg in &mut self.args {
2875                 fixup(arg, false);
2876             }
2877             return;
2878         }
2879
2880         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2881             cx.tcx().sess.fatal(&msg);
2882         }
2883     }
2884 }