]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
c9be59ca46c28e38731001da5c4c3c513ccdff5d
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast::{self as ast, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'tcx>(
34         tcx: TyCtxt<'tcx>,
35         ty: Ty<'tcx>,
36         repr: &ReprOptions,
37         min: i128,
38         max: i128,
39     ) -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Gets the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Finds the appropriate Integer type and signedness for the given
75     /// signed discriminant range and `#[repr]` attribute.
76     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'tcx>(
79         tcx: TyCtxt<'tcx>,
80         ty: Ty<'tcx>,
81         repr: &ReprOptions,
82         min: i128,
83         max: i128,
84     ) -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!(
100                     "Integer::repr_discr: `#[repr]` hint too small for \
101                       discriminant range of enum `{}",
102                     ty
103                 )
104             }
105             return (discr, ity.is_signed());
106         }
107
108         if repr.c() {
109             match &tcx.sess.target.arch[..] {
110                 // WARNING: the ARM EABI has two variants; the one corresponding
111                 // to `at_least == I32` appears to be used on Linux and NetBSD,
112                 // but some systems may use the variant corresponding to no
113                 // lower bound. However, we don't run on those yet...?
114                 "arm" => min_from_extern = Some(I32),
115                 _ => min_from_extern = Some(I32),
116             }
117         }
118
119         let at_least = min_from_extern.unwrap_or(min_default);
120
121         // If there are no negative values, we can use the unsigned fit.
122         if min >= 0 {
123             (cmp::max(unsigned_fit, at_least), false)
124         } else {
125             (cmp::max(signed_fit, at_least), true)
126         }
127     }
128 }
129
130 pub trait PrimitiveExt {
131     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
133 }
134
135 impl PrimitiveExt for Primitive {
136     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
137         match *self {
138             Int(i, signed) => i.to_ty(tcx, signed),
139             F32 => tcx.types.f32,
140             F64 => tcx.types.f64,
141             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142         }
143     }
144
145     /// Return an *integer* type matching this primitive.
146     /// Useful in particular when dealing with enum discriminants.
147     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148         match *self {
149             Int(i, signed) => i.to_ty(tcx, signed),
150             Pointer => tcx.types.usize,
151             F32 | F64 => bug!("floats do not have an int type"),
152         }
153     }
154 }
155
156 /// The first half of a fat pointer.
157 ///
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
161
162 /// The second half of a fat pointer.
163 ///
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
167
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
170     Unknown(Ty<'tcx>),
171     SizeOverflow(Ty<'tcx>),
172 }
173
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
176         match *self {
177             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
178             LayoutError::SizeOverflow(ty) => {
179                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
180             }
181         }
182     }
183 }
184
185 fn layout_raw<'tcx>(
186     tcx: TyCtxt<'tcx>,
187     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189     ty::tls::with_related_context(tcx, move |icx| {
190         let (param_env, ty) = query.into_parts();
191
192         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
194         }
195
196         // Update the ImplicitCtxt to increase the layout_depth
197         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
198
199         ty::tls::enter_context(&icx, |_| {
200             let cx = LayoutCx { tcx, param_env };
201             let layout = cx.layout_raw_uncached(ty);
202             // Type-level uninhabitedness should always imply ABI uninhabitedness.
203             if let Ok(layout) = layout {
204                 if ty.conservative_is_privately_uninhabited(tcx) {
205                     assert!(layout.abi.is_uninhabited());
206                 }
207             }
208             layout
209         })
210     })
211 }
212
213 pub fn provide(providers: &mut ty::query::Providers) {
214     *providers = ty::query::Providers { layout_raw, ..*providers };
215 }
216
217 pub struct LayoutCx<'tcx, C> {
218     pub tcx: C,
219     pub param_env: ty::ParamEnv<'tcx>,
220 }
221
222 #[derive(Copy, Clone, Debug)]
223 enum StructKind {
224     /// A tuple, closure, or univariant which cannot be coerced to unsized.
225     AlwaysSized,
226     /// A univariant, the last field of which may be coerced to unsized.
227     MaybeUnsized,
228     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229     Prefixed(Size, Align),
230 }
231
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238     let mut inverse = vec![0; map.len()];
239     for i in 0..map.len() {
240         inverse[map[i] as usize] = i as u32;
241     }
242     inverse
243 }
244
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247         let dl = self.data_layout();
248         let b_align = b.value.align(dl);
249         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250         let b_offset = a.value.size(dl).align_to(b_align.abi);
251         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
252
253         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254         // returns the last maximum.
255         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256             .into_iter()
257             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258             .max_by_key(|niche| niche.available(dl));
259
260         Layout {
261             variants: Variants::Single { index: VariantIdx::new(0) },
262             fields: FieldsShape::Arbitrary {
263                 offsets: vec![Size::ZERO, b_offset],
264                 memory_index: vec![0, 1],
265             },
266             abi: Abi::ScalarPair(a, b),
267             largest_niche,
268             align,
269             size,
270         }
271     }
272
273     fn univariant_uninterned(
274         &self,
275         ty: Ty<'tcx>,
276         fields: &[TyAndLayout<'_>],
277         repr: &ReprOptions,
278         kind: StructKind,
279     ) -> Result<Layout, LayoutError<'tcx>> {
280         let dl = self.data_layout();
281         let pack = repr.pack;
282         if pack.is_some() && repr.align.is_some() {
283             bug!("struct cannot be packed and aligned");
284         }
285
286         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
287
288         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
289
290         let optimize = !repr.inhibit_struct_field_reordering_opt();
291         if optimize {
292             let end =
293                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294             let optimizing = &mut inverse_memory_index[..end];
295             let field_align = |f: &TyAndLayout<'_>| {
296                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
297             };
298             match kind {
299                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300                     optimizing.sort_by_key(|&x| {
301                         // Place ZSTs first to avoid "interesting offsets",
302                         // especially with only one or two non-ZST fields.
303                         let f = &fields[x as usize];
304                         (!f.is_zst(), cmp::Reverse(field_align(f)))
305                     });
306                 }
307                 StructKind::Prefixed(..) => {
308                     // Sort in ascending alignment so that the layout stay optimal
309                     // regardless of the prefix
310                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
311                 }
312             }
313         }
314
315         // inverse_memory_index holds field indices by increasing memory offset.
316         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317         // We now write field offsets to the corresponding offset slot;
318         // field 5 with offset 0 puts 0 in offsets[5].
319         // At the bottom of this function, we invert `inverse_memory_index` to
320         // produce `memory_index` (see `invert_mapping`).
321
322         let mut sized = true;
323         let mut offsets = vec![Size::ZERO; fields.len()];
324         let mut offset = Size::ZERO;
325         let mut largest_niche = None;
326         let mut largest_niche_available = 0;
327
328         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
329             let prefix_align =
330                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331             align = align.max(AbiAndPrefAlign::new(prefix_align));
332             offset = prefix_size.align_to(prefix_align);
333         }
334
335         for &i in &inverse_memory_index {
336             let field = fields[i as usize];
337             if !sized {
338                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
339             }
340
341             if field.is_unsized() {
342                 sized = false;
343             }
344
345             // Invariant: offset < dl.obj_size_bound() <= 1<<61
346             let field_align = if let Some(pack) = pack {
347                 field.align.min(AbiAndPrefAlign::new(pack))
348             } else {
349                 field.align
350             };
351             offset = offset.align_to(field_align.abi);
352             align = align.max(field_align);
353
354             debug!("univariant offset: {:?} field: {:#?}", offset, field);
355             offsets[i as usize] = offset;
356
357             if !repr.hide_niche() {
358                 if let Some(mut niche) = field.largest_niche.clone() {
359                     let available = niche.available(dl);
360                     if available > largest_niche_available {
361                         largest_niche_available = available;
362                         niche.offset += offset;
363                         largest_niche = Some(niche);
364                     }
365                 }
366             }
367
368             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
369         }
370
371         if let Some(repr_align) = repr.align {
372             align = align.max(AbiAndPrefAlign::new(repr_align));
373         }
374
375         debug!("univariant min_size: {:?}", offset);
376         let min_size = offset;
377
378         // As stated above, inverse_memory_index holds field indices by increasing offset.
379         // This makes it an already-sorted view of the offsets vec.
380         // To invert it, consider:
381         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382         // Field 5 would be the first element, so memory_index is i:
383         // Note: if we didn't optimize, it's already right.
384
385         let memory_index =
386             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
387
388         let size = min_size.align_to(align.abi);
389         let mut abi = Abi::Aggregate { sized };
390
391         // Unpack newtype ABIs and find scalar pairs.
392         if sized && size.bytes() > 0 {
393             // All other fields must be ZSTs.
394             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
395
396             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397                 // We have exactly one non-ZST field.
398                 (Some((i, field)), None, None) => {
399                     // Field fills the struct and it has a scalar or scalar pair ABI.
400                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
401                     {
402                         match field.abi {
403                             // For plain scalars, or vectors of them, we can't unpack
404                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
405                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406                                 abi = field.abi.clone();
407                             }
408                             // But scalar pairs are Rust-specific and get
409                             // treated as aggregates by C ABIs anyway.
410                             Abi::ScalarPair(..) => {
411                                 abi = field.abi.clone();
412                             }
413                             _ => {}
414                         }
415                     }
416                 }
417
418                 // Two non-ZST fields, and they're both scalars.
419                 (
420                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
422                     None,
423                 ) => {
424                     // Order by the memory placement, not source order.
425                     let ((i, a), (j, b)) =
426                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427                     let pair = self.scalar_pair(a.clone(), b.clone());
428                     let pair_offsets = match pair.fields {
429                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430                             assert_eq!(memory_index, &[0, 1]);
431                             offsets
432                         }
433                         _ => bug!(),
434                     };
435                     if offsets[i] == pair_offsets[0]
436                         && offsets[j] == pair_offsets[1]
437                         && align == pair.align
438                         && size == pair.size
439                     {
440                         // We can use `ScalarPair` only when it matches our
441                         // already computed layout (including `#[repr(C)]`).
442                         abi = pair.abi;
443                     }
444                 }
445
446                 _ => {}
447             }
448         }
449
450         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451             abi = Abi::Uninhabited;
452         }
453
454         Ok(Layout {
455             variants: Variants::Single { index: VariantIdx::new(0) },
456             fields: FieldsShape::Arbitrary { offsets, memory_index },
457             abi,
458             largest_niche,
459             align,
460             size,
461         })
462     }
463
464     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
465         let tcx = self.tcx;
466         let param_env = self.param_env;
467         let dl = self.data_layout();
468         let scalar_unit = |value: Primitive| {
469             let bits = value.size(dl).bits();
470             assert!(bits <= 128);
471             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
472         };
473         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
474
475         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
476             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
477         };
478         debug_assert!(!ty.has_infer_types_or_consts());
479
480         Ok(match *ty.kind() {
481             // Basic scalars.
482             ty::Bool => tcx.intern_layout(Layout::scalar(
483                 self,
484                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
485             )),
486             ty::Char => tcx.intern_layout(Layout::scalar(
487                 self,
488                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
489             )),
490             ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491             ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
492             ty::Float(fty) => scalar(match fty {
493                 ast::FloatTy::F32 => F32,
494                 ast::FloatTy::F64 => F64,
495             }),
496             ty::FnPtr(_) => {
497                 let mut ptr = scalar_unit(Pointer);
498                 ptr.valid_range = 1..=*ptr.valid_range.end();
499                 tcx.intern_layout(Layout::scalar(self, ptr))
500             }
501
502             // The never type.
503             ty::Never => tcx.intern_layout(Layout {
504                 variants: Variants::Single { index: VariantIdx::new(0) },
505                 fields: FieldsShape::Primitive,
506                 abi: Abi::Uninhabited,
507                 largest_niche: None,
508                 align: dl.i8_align,
509                 size: Size::ZERO,
510             }),
511
512             // Potentially-wide pointers.
513             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514                 let mut data_ptr = scalar_unit(Pointer);
515                 if !ty.is_unsafe_ptr() {
516                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517                 }
518
519                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
522                 }
523
524                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
525                 let metadata = match unsized_part.kind() {
526                     ty::Foreign(..) => {
527                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
528                     }
529                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
530                     ty::Dynamic(..) => {
531                         let mut vtable = scalar_unit(Pointer);
532                         vtable.valid_range = 1..=*vtable.valid_range.end();
533                         vtable
534                     }
535                     _ => return Err(LayoutError::Unknown(unsized_part)),
536                 };
537
538                 // Effectively a (ptr, meta) tuple.
539                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
540             }
541
542             // Arrays and slices.
543             ty::Array(element, mut count) => {
544                 if count.has_projections() {
545                     count = tcx.normalize_erasing_regions(param_env, count);
546                     if count.has_projections() {
547                         return Err(LayoutError::Unknown(ty));
548                     }
549                 }
550
551                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
552                 let element = self.layout_of(element)?;
553                 let size =
554                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
555
556                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
557                     Abi::Uninhabited
558                 } else {
559                     Abi::Aggregate { sized: true }
560                 };
561
562                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
563
564                 tcx.intern_layout(Layout {
565                     variants: Variants::Single { index: VariantIdx::new(0) },
566                     fields: FieldsShape::Array { stride: element.size, count },
567                     abi,
568                     largest_niche,
569                     align: element.align,
570                     size,
571                 })
572             }
573             ty::Slice(element) => {
574                 let element = self.layout_of(element)?;
575                 tcx.intern_layout(Layout {
576                     variants: Variants::Single { index: VariantIdx::new(0) },
577                     fields: FieldsShape::Array { stride: element.size, count: 0 },
578                     abi: Abi::Aggregate { sized: false },
579                     largest_niche: None,
580                     align: element.align,
581                     size: Size::ZERO,
582                 })
583             }
584             ty::Str => tcx.intern_layout(Layout {
585                 variants: Variants::Single { index: VariantIdx::new(0) },
586                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
587                 abi: Abi::Aggregate { sized: false },
588                 largest_niche: None,
589                 align: dl.i8_align,
590                 size: Size::ZERO,
591             }),
592
593             // Odd unit types.
594             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
595             ty::Dynamic(..) | ty::Foreign(..) => {
596                 let mut unit = self.univariant_uninterned(
597                     ty,
598                     &[],
599                     &ReprOptions::default(),
600                     StructKind::AlwaysSized,
601                 )?;
602                 match unit.abi {
603                     Abi::Aggregate { ref mut sized } => *sized = false,
604                     _ => bug!(),
605                 }
606                 tcx.intern_layout(unit)
607             }
608
609             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
610
611             ty::Closure(_, ref substs) => {
612                 let tys = substs.as_closure().upvar_tys();
613                 univariant(
614                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
615                     &ReprOptions::default(),
616                     StructKind::AlwaysSized,
617                 )?
618             }
619
620             ty::Tuple(tys) => {
621                 let kind =
622                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
623
624                 univariant(
625                     &tys.iter()
626                         .map(|k| self.layout_of(k.expect_ty()))
627                         .collect::<Result<Vec<_>, _>>()?,
628                     &ReprOptions::default(),
629                     kind,
630                 )?
631             }
632
633             // SIMD vector types.
634             ty::Adt(def, substs) if def.repr.simd() => {
635                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
636                 //
637                 // * #[repr(simd)] struct S(T, T, T, T);
638                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
639                 // * #[repr(simd)] struct S([T; 4])
640                 //
641                 // where T is a primitive scalar (integer/float/pointer).
642
643                 // SIMD vectors with zero fields are not supported.
644                 // (should be caught by typeck)
645                 if def.non_enum_variant().fields.is_empty() {
646                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
647                 }
648
649                 // Type of the first ADT field:
650                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
651
652                 // Heterogeneous SIMD vectors are not supported:
653                 // (should be caught by typeck)
654                 for fi in &def.non_enum_variant().fields {
655                     if fi.ty(tcx, substs) != f0_ty {
656                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
657                     }
658                 }
659
660                 // The element type and number of elements of the SIMD vector
661                 // are obtained from:
662                 //
663                 // * the element type and length of the single array field, if
664                 // the first field is of array type, or
665                 //
666                 // * the homogenous field type and the number of fields.
667                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
668                     // First ADT field is an array:
669
670                     // SIMD vectors with multiple array fields are not supported:
671                     // (should be caught by typeck)
672                     if def.non_enum_variant().fields.len() != 1 {
673                         tcx.sess.fatal(&format!(
674                             "monomorphising SIMD type `{}` with more than one array field",
675                             ty
676                         ));
677                     }
678
679                     // Extract the number of elements from the layout of the array field:
680                     let len = if let Ok(TyAndLayout {
681                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
682                         ..
683                     }) = self.layout_of(f0_ty)
684                     {
685                         count
686                     } else {
687                         return Err(LayoutError::Unknown(ty));
688                     };
689
690                     (*e_ty, *len, true)
691                 } else {
692                     // First ADT field is not an array:
693                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
694                 };
695
696                 // SIMD vectors of zero length are not supported.
697                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
698                 // support.
699                 //
700                 // Can't be caught in typeck if the array length is generic.
701                 if e_len == 0 {
702                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
703                 } else if e_len > 65536 {
704                     tcx.sess.fatal(&format!(
705                         "monomorphising SIMD type `{}` of length greater than 65536",
706                         ty,
707                     ));
708                 }
709
710                 // Compute the ABI of the element type:
711                 let e_ly = self.layout_of(e_ty)?;
712                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
713                     scalar.clone()
714                 } else {
715                     // This error isn't caught in typeck, e.g., if
716                     // the element type of the vector is generic.
717                     tcx.sess.fatal(&format!(
718                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
719                         (integer/float/pointer) element type `{}`",
720                         ty, e_ty
721                     ))
722                 };
723
724                 // Compute the size and alignment of the vector:
725                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
726                 let align = dl.vector_align(size);
727                 let size = size.align_to(align.abi);
728
729                 // Compute the placement of the vector fields:
730                 let fields = if is_array {
731                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
732                 } else {
733                     FieldsShape::Array { stride: e_ly.size, count: e_len }
734                 };
735
736                 tcx.intern_layout(Layout {
737                     variants: Variants::Single { index: VariantIdx::new(0) },
738                     fields,
739                     abi: Abi::Vector { element: e_abi, count: e_len },
740                     largest_niche: e_ly.largest_niche.clone(),
741                     size,
742                     align,
743                 })
744             }
745
746             // ADTs.
747             ty::Adt(def, substs) => {
748                 // Cache the field layouts.
749                 let variants = def
750                     .variants
751                     .iter()
752                     .map(|v| {
753                         v.fields
754                             .iter()
755                             .map(|field| self.layout_of(field.ty(tcx, substs)))
756                             .collect::<Result<Vec<_>, _>>()
757                     })
758                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
759
760                 if def.is_union() {
761                     if def.repr.pack.is_some() && def.repr.align.is_some() {
762                         bug!("union cannot be packed and aligned");
763                     }
764
765                     let mut align =
766                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
767
768                     if let Some(repr_align) = def.repr.align {
769                         align = align.max(AbiAndPrefAlign::new(repr_align));
770                     }
771
772                     let optimize = !def.repr.inhibit_union_abi_opt();
773                     let mut size = Size::ZERO;
774                     let mut abi = Abi::Aggregate { sized: true };
775                     let index = VariantIdx::new(0);
776                     for field in &variants[index] {
777                         assert!(!field.is_unsized());
778                         align = align.max(field.align);
779
780                         // If all non-ZST fields have the same ABI, forward this ABI
781                         if optimize && !field.is_zst() {
782                             // Normalize scalar_unit to the maximal valid range
783                             let field_abi = match &field.abi {
784                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
785                                 Abi::ScalarPair(x, y) => {
786                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
787                                 }
788                                 Abi::Vector { element: x, count } => {
789                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
790                                 }
791                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
792                                     Abi::Aggregate { sized: true }
793                                 }
794                             };
795
796                             if size == Size::ZERO {
797                                 // first non ZST: initialize 'abi'
798                                 abi = field_abi;
799                             } else if abi != field_abi {
800                                 // different fields have different ABI: reset to Aggregate
801                                 abi = Abi::Aggregate { sized: true };
802                             }
803                         }
804
805                         size = cmp::max(size, field.size);
806                     }
807
808                     if let Some(pack) = def.repr.pack {
809                         align = align.min(AbiAndPrefAlign::new(pack));
810                     }
811
812                     return Ok(tcx.intern_layout(Layout {
813                         variants: Variants::Single { index },
814                         fields: FieldsShape::Union(
815                             NonZeroUsize::new(variants[index].len())
816                                 .ok_or(LayoutError::Unknown(ty))?,
817                         ),
818                         abi,
819                         largest_niche: None,
820                         align,
821                         size: size.align_to(align.abi),
822                     }));
823                 }
824
825                 // A variant is absent if it's uninhabited and only has ZST fields.
826                 // Present uninhabited variants only require space for their fields,
827                 // but *not* an encoding of the discriminant (e.g., a tag value).
828                 // See issue #49298 for more details on the need to leave space
829                 // for non-ZST uninhabited data (mostly partial initialization).
830                 let absent = |fields: &[TyAndLayout<'_>]| {
831                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
832                     let is_zst = fields.iter().all(|f| f.is_zst());
833                     uninhabited && is_zst
834                 };
835                 let (present_first, present_second) = {
836                     let mut present_variants = variants
837                         .iter_enumerated()
838                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
839                     (present_variants.next(), present_variants.next())
840                 };
841                 let present_first = match present_first {
842                     Some(present_first) => present_first,
843                     // Uninhabited because it has no variants, or only absent ones.
844                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
845                     // If it's a struct, still compute a layout so that we can still compute the
846                     // field offsets.
847                     None => VariantIdx::new(0),
848                 };
849
850                 let is_struct = !def.is_enum() ||
851                     // Only one variant is present.
852                     (present_second.is_none() &&
853                     // Representation optimizations are allowed.
854                     !def.repr.inhibit_enum_layout_opt());
855                 if is_struct {
856                     // Struct, or univariant enum equivalent to a struct.
857                     // (Typechecking will reject discriminant-sizing attrs.)
858
859                     let v = present_first;
860                     let kind = if def.is_enum() || variants[v].is_empty() {
861                         StructKind::AlwaysSized
862                     } else {
863                         let param_env = tcx.param_env(def.did);
864                         let last_field = def.variants[v].fields.last().unwrap();
865                         let always_sized =
866                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
867                         if !always_sized {
868                             StructKind::MaybeUnsized
869                         } else {
870                             StructKind::AlwaysSized
871                         }
872                     };
873
874                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
875                     st.variants = Variants::Single { index: v };
876                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
877                     match st.abi {
878                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
879                             // the asserts ensure that we are not using the
880                             // `#[rustc_layout_scalar_valid_range(n)]`
881                             // attribute to widen the range of anything as that would probably
882                             // result in UB somewhere
883                             // FIXME(eddyb) the asserts are probably not needed,
884                             // as larger validity ranges would result in missed
885                             // optimizations, *not* wrongly assuming the inner
886                             // value is valid. e.g. unions enlarge validity ranges,
887                             // because the values may be uninitialized.
888                             if let Bound::Included(start) = start {
889                                 // FIXME(eddyb) this might be incorrect - it doesn't
890                                 // account for wrap-around (end < start) ranges.
891                                 assert!(*scalar.valid_range.start() <= start);
892                                 scalar.valid_range = start..=*scalar.valid_range.end();
893                             }
894                             if let Bound::Included(end) = end {
895                                 // FIXME(eddyb) this might be incorrect - it doesn't
896                                 // account for wrap-around (end < start) ranges.
897                                 assert!(*scalar.valid_range.end() >= end);
898                                 scalar.valid_range = *scalar.valid_range.start()..=end;
899                             }
900
901                             // Update `largest_niche` if we have introduced a larger niche.
902                             let niche = if def.repr.hide_niche() {
903                                 None
904                             } else {
905                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
906                             };
907                             if let Some(niche) = niche {
908                                 match &st.largest_niche {
909                                     Some(largest_niche) => {
910                                         // Replace the existing niche even if they're equal,
911                                         // because this one is at a lower offset.
912                                         if largest_niche.available(dl) <= niche.available(dl) {
913                                             st.largest_niche = Some(niche);
914                                         }
915                                     }
916                                     None => st.largest_niche = Some(niche),
917                                 }
918                             }
919                         }
920                         _ => assert!(
921                             start == Bound::Unbounded && end == Bound::Unbounded,
922                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
923                             def,
924                             st,
925                         ),
926                     }
927
928                     return Ok(tcx.intern_layout(st));
929                 }
930
931                 // At this point, we have handled all unions and
932                 // structs. (We have also handled univariant enums
933                 // that allow representation optimization.)
934                 assert!(def.is_enum());
935
936                 // The current code for niche-filling relies on variant indices
937                 // instead of actual discriminants, so dataful enums with
938                 // explicit discriminants (RFC #2363) would misbehave.
939                 let no_explicit_discriminants = def
940                     .variants
941                     .iter_enumerated()
942                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
943
944                 let mut niche_filling_layout = None;
945
946                 // Niche-filling enum optimization.
947                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
948                     let mut dataful_variant = None;
949                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
950
951                     // Find one non-ZST variant.
952                     'variants: for (v, fields) in variants.iter_enumerated() {
953                         if absent(fields) {
954                             continue 'variants;
955                         }
956                         for f in fields {
957                             if !f.is_zst() {
958                                 if dataful_variant.is_none() {
959                                     dataful_variant = Some(v);
960                                     continue 'variants;
961                                 } else {
962                                     dataful_variant = None;
963                                     break 'variants;
964                                 }
965                             }
966                         }
967                         niche_variants = *niche_variants.start().min(&v)..=v;
968                     }
969
970                     if niche_variants.start() > niche_variants.end() {
971                         dataful_variant = None;
972                     }
973
974                     if let Some(i) = dataful_variant {
975                         let count = (niche_variants.end().as_u32()
976                             - niche_variants.start().as_u32()
977                             + 1) as u128;
978
979                         // Find the field with the largest niche
980                         let niche_candidate = variants[i]
981                             .iter()
982                             .enumerate()
983                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
984                             .max_by_key(|(_, niche)| niche.available(dl));
985
986                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
987                             niche_candidate.and_then(|(field_index, niche)| {
988                                 Some((field_index, niche, niche.reserve(self, count)?))
989                             })
990                         {
991                             let mut align = dl.aggregate_align;
992                             let st = variants
993                                 .iter_enumerated()
994                                 .map(|(j, v)| {
995                                     let mut st = self.univariant_uninterned(
996                                         ty,
997                                         v,
998                                         &def.repr,
999                                         StructKind::AlwaysSized,
1000                                     )?;
1001                                     st.variants = Variants::Single { index: j };
1002
1003                                     align = align.max(st.align);
1004
1005                                     Ok(st)
1006                                 })
1007                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1008
1009                             let offset = st[i].fields.offset(field_index) + niche.offset;
1010                             let size = st[i].size;
1011
1012                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1013                                 Abi::Uninhabited
1014                             } else {
1015                                 match st[i].abi {
1016                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1017                                     Abi::ScalarPair(ref first, ref second) => {
1018                                         // We need to use scalar_unit to reset the
1019                                         // valid range to the maximal one for that
1020                                         // primitive, because only the niche is
1021                                         // guaranteed to be initialised, not the
1022                                         // other primitive.
1023                                         if offset.bytes() == 0 {
1024                                             Abi::ScalarPair(
1025                                                 niche_scalar.clone(),
1026                                                 scalar_unit(second.value),
1027                                             )
1028                                         } else {
1029                                             Abi::ScalarPair(
1030                                                 scalar_unit(first.value),
1031                                                 niche_scalar.clone(),
1032                                             )
1033                                         }
1034                                     }
1035                                     _ => Abi::Aggregate { sized: true },
1036                                 }
1037                             };
1038
1039                             let largest_niche =
1040                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1041
1042                             niche_filling_layout = Some(Layout {
1043                                 variants: Variants::Multiple {
1044                                     tag: niche_scalar,
1045                                     tag_encoding: TagEncoding::Niche {
1046                                         dataful_variant: i,
1047                                         niche_variants,
1048                                         niche_start,
1049                                     },
1050                                     tag_field: 0,
1051                                     variants: st,
1052                                 },
1053                                 fields: FieldsShape::Arbitrary {
1054                                     offsets: vec![offset],
1055                                     memory_index: vec![0],
1056                                 },
1057                                 abi,
1058                                 largest_niche,
1059                                 size,
1060                                 align,
1061                             });
1062                         }
1063                     }
1064                 }
1065
1066                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1067                 let discr_type = def.repr.discr_type();
1068                 let bits = Integer::from_attr(self, discr_type).size().bits();
1069                 for (i, discr) in def.discriminants(tcx) {
1070                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1071                         continue;
1072                     }
1073                     let mut x = discr.val as i128;
1074                     if discr_type.is_signed() {
1075                         // sign extend the raw representation to be an i128
1076                         x = (x << (128 - bits)) >> (128 - bits);
1077                     }
1078                     if x < min {
1079                         min = x;
1080                     }
1081                     if x > max {
1082                         max = x;
1083                     }
1084                 }
1085                 // We might have no inhabited variants, so pretend there's at least one.
1086                 if (min, max) == (i128::MAX, i128::MIN) {
1087                     min = 0;
1088                     max = 0;
1089                 }
1090                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1091                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1092
1093                 let mut align = dl.aggregate_align;
1094                 let mut size = Size::ZERO;
1095
1096                 // We're interested in the smallest alignment, so start large.
1097                 let mut start_align = Align::from_bytes(256).unwrap();
1098                 assert_eq!(Integer::for_align(dl, start_align), None);
1099
1100                 // repr(C) on an enum tells us to make a (tag, union) layout,
1101                 // so we need to grow the prefix alignment to be at least
1102                 // the alignment of the union. (This value is used both for
1103                 // determining the alignment of the overall enum, and the
1104                 // determining the alignment of the payload after the tag.)
1105                 let mut prefix_align = min_ity.align(dl).abi;
1106                 if def.repr.c() {
1107                     for fields in &variants {
1108                         for field in fields {
1109                             prefix_align = prefix_align.max(field.align.abi);
1110                         }
1111                     }
1112                 }
1113
1114                 // Create the set of structs that represent each variant.
1115                 let mut layout_variants = variants
1116                     .iter_enumerated()
1117                     .map(|(i, field_layouts)| {
1118                         let mut st = self.univariant_uninterned(
1119                             ty,
1120                             &field_layouts,
1121                             &def.repr,
1122                             StructKind::Prefixed(min_ity.size(), prefix_align),
1123                         )?;
1124                         st.variants = Variants::Single { index: i };
1125                         // Find the first field we can't move later
1126                         // to make room for a larger discriminant.
1127                         for field in
1128                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1129                         {
1130                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1131                                 start_align = start_align.min(field.align.abi);
1132                                 break;
1133                             }
1134                         }
1135                         size = cmp::max(size, st.size);
1136                         align = align.max(st.align);
1137                         Ok(st)
1138                     })
1139                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1140
1141                 // Align the maximum variant size to the largest alignment.
1142                 size = size.align_to(align.abi);
1143
1144                 if size.bytes() >= dl.obj_size_bound() {
1145                     return Err(LayoutError::SizeOverflow(ty));
1146                 }
1147
1148                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1149                 if typeck_ity < min_ity {
1150                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1151                     // some reason at this point (based on values discriminant can take on). Mostly
1152                     // because this discriminant will be loaded, and then stored into variable of
1153                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1154                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1155                     // discriminant values. That would be a bug, because then, in codegen, in order
1156                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1157                     // space necessary to represent would have to be discarded (or layout is wrong
1158                     // on thinking it needs 16 bits)
1159                     bug!(
1160                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1161                         min_ity,
1162                         typeck_ity
1163                     );
1164                     // However, it is fine to make discr type however large (as an optimisation)
1165                     // after this point â€“ we’ll just truncate the value we load in codegen.
1166                 }
1167
1168                 // Check to see if we should use a different type for the
1169                 // discriminant. We can safely use a type with the same size
1170                 // as the alignment of the first field of each variant.
1171                 // We increase the size of the discriminant to avoid LLVM copying
1172                 // padding when it doesn't need to. This normally causes unaligned
1173                 // load/stores and excessive memcpy/memset operations. By using a
1174                 // bigger integer size, LLVM can be sure about its contents and
1175                 // won't be so conservative.
1176
1177                 // Use the initial field alignment
1178                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1179                     min_ity
1180                 } else {
1181                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1182                 };
1183
1184                 // If the alignment is not larger than the chosen discriminant size,
1185                 // don't use the alignment as the final size.
1186                 if ity <= min_ity {
1187                     ity = min_ity;
1188                 } else {
1189                     // Patch up the variants' first few fields.
1190                     let old_ity_size = min_ity.size();
1191                     let new_ity_size = ity.size();
1192                     for variant in &mut layout_variants {
1193                         match variant.fields {
1194                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1195                                 for i in offsets {
1196                                     if *i <= old_ity_size {
1197                                         assert_eq!(*i, old_ity_size);
1198                                         *i = new_ity_size;
1199                                     }
1200                                 }
1201                                 // We might be making the struct larger.
1202                                 if variant.size <= old_ity_size {
1203                                     variant.size = new_ity_size;
1204                                 }
1205                             }
1206                             _ => bug!(),
1207                         }
1208                     }
1209                 }
1210
1211                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1212                 let tag = Scalar {
1213                     value: Int(ity, signed),
1214                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1215                 };
1216                 let mut abi = Abi::Aggregate { sized: true };
1217                 if tag.value.size(dl) == size {
1218                     abi = Abi::Scalar(tag.clone());
1219                 } else {
1220                     // Try to use a ScalarPair for all tagged enums.
1221                     let mut common_prim = None;
1222                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1223                         let offsets = match layout_variant.fields {
1224                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1225                             _ => bug!(),
1226                         };
1227                         let mut fields =
1228                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1229                         let (field, offset) = match (fields.next(), fields.next()) {
1230                             (None, None) => continue,
1231                             (Some(pair), None) => pair,
1232                             _ => {
1233                                 common_prim = None;
1234                                 break;
1235                             }
1236                         };
1237                         let prim = match field.abi {
1238                             Abi::Scalar(ref scalar) => scalar.value,
1239                             _ => {
1240                                 common_prim = None;
1241                                 break;
1242                             }
1243                         };
1244                         if let Some(pair) = common_prim {
1245                             // This is pretty conservative. We could go fancier
1246                             // by conflating things like i32 and u32, or even
1247                             // realising that (u8, u8) could just cohabit with
1248                             // u16 or even u32.
1249                             if pair != (prim, offset) {
1250                                 common_prim = None;
1251                                 break;
1252                             }
1253                         } else {
1254                             common_prim = Some((prim, offset));
1255                         }
1256                     }
1257                     if let Some((prim, offset)) = common_prim {
1258                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1259                         let pair_offsets = match pair.fields {
1260                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1261                                 assert_eq!(memory_index, &[0, 1]);
1262                                 offsets
1263                             }
1264                             _ => bug!(),
1265                         };
1266                         if pair_offsets[0] == Size::ZERO
1267                             && pair_offsets[1] == *offset
1268                             && align == pair.align
1269                             && size == pair.size
1270                         {
1271                             // We can use `ScalarPair` only when it matches our
1272                             // already computed layout (including `#[repr(C)]`).
1273                             abi = pair.abi;
1274                         }
1275                     }
1276                 }
1277
1278                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1279                     abi = Abi::Uninhabited;
1280                 }
1281
1282                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1283
1284                 let tagged_layout = Layout {
1285                     variants: Variants::Multiple {
1286                         tag,
1287                         tag_encoding: TagEncoding::Direct,
1288                         tag_field: 0,
1289                         variants: layout_variants,
1290                     },
1291                     fields: FieldsShape::Arbitrary {
1292                         offsets: vec![Size::ZERO],
1293                         memory_index: vec![0],
1294                     },
1295                     largest_niche,
1296                     abi,
1297                     align,
1298                     size,
1299                 };
1300
1301                 let best_layout = match (tagged_layout, niche_filling_layout) {
1302                     (tagged_layout, Some(niche_filling_layout)) => {
1303                         // Pick the smaller layout; otherwise,
1304                         // pick the layout with the larger niche; otherwise,
1305                         // pick tagged as it has simpler codegen.
1306                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1307                             let niche_size =
1308                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1309                             (layout.size, cmp::Reverse(niche_size))
1310                         })
1311                     }
1312                     (tagged_layout, None) => tagged_layout,
1313                 };
1314
1315                 tcx.intern_layout(best_layout)
1316             }
1317
1318             // Types with no meaningful known layout.
1319             ty::Projection(_) | ty::Opaque(..) => {
1320                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1321                 if ty == normalized {
1322                     return Err(LayoutError::Unknown(ty));
1323                 }
1324                 tcx.layout_raw(param_env.and(normalized))?
1325             }
1326
1327             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1328                 bug!("Layout::compute: unexpected type `{}`", ty)
1329             }
1330
1331             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1332                 return Err(LayoutError::Unknown(ty));
1333             }
1334         })
1335     }
1336 }
1337
1338 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1339 #[derive(Clone, Debug, PartialEq)]
1340 enum SavedLocalEligibility {
1341     Unassigned,
1342     Assigned(VariantIdx),
1343     // FIXME: Use newtype_index so we aren't wasting bytes
1344     Ineligible(Option<u32>),
1345 }
1346
1347 // When laying out generators, we divide our saved local fields into two
1348 // categories: overlap-eligible and overlap-ineligible.
1349 //
1350 // Those fields which are ineligible for overlap go in a "prefix" at the
1351 // beginning of the layout, and always have space reserved for them.
1352 //
1353 // Overlap-eligible fields are only assigned to one variant, so we lay
1354 // those fields out for each variant and put them right after the
1355 // prefix.
1356 //
1357 // Finally, in the layout details, we point to the fields from the
1358 // variants they are assigned to. It is possible for some fields to be
1359 // included in multiple variants. No field ever "moves around" in the
1360 // layout; its offset is always the same.
1361 //
1362 // Also included in the layout are the upvars and the discriminant.
1363 // These are included as fields on the "outer" layout; they are not part
1364 // of any variant.
1365 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1366     /// Compute the eligibility and assignment of each local.
1367     fn generator_saved_local_eligibility(
1368         &self,
1369         info: &GeneratorLayout<'tcx>,
1370     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1371         use SavedLocalEligibility::*;
1372
1373         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1374             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1375
1376         // The saved locals not eligible for overlap. These will get
1377         // "promoted" to the prefix of our generator.
1378         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1379
1380         // Figure out which of our saved locals are fields in only
1381         // one variant. The rest are deemed ineligible for overlap.
1382         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1383             for local in fields {
1384                 match assignments[*local] {
1385                     Unassigned => {
1386                         assignments[*local] = Assigned(variant_index);
1387                     }
1388                     Assigned(idx) => {
1389                         // We've already seen this local at another suspension
1390                         // point, so it is no longer a candidate.
1391                         trace!(
1392                             "removing local {:?} in >1 variant ({:?}, {:?})",
1393                             local,
1394                             variant_index,
1395                             idx
1396                         );
1397                         ineligible_locals.insert(*local);
1398                         assignments[*local] = Ineligible(None);
1399                     }
1400                     Ineligible(_) => {}
1401                 }
1402             }
1403         }
1404
1405         // Next, check every pair of eligible locals to see if they
1406         // conflict.
1407         for local_a in info.storage_conflicts.rows() {
1408             let conflicts_a = info.storage_conflicts.count(local_a);
1409             if ineligible_locals.contains(local_a) {
1410                 continue;
1411             }
1412
1413             for local_b in info.storage_conflicts.iter(local_a) {
1414                 // local_a and local_b are storage live at the same time, therefore they
1415                 // cannot overlap in the generator layout. The only way to guarantee
1416                 // this is if they are in the same variant, or one is ineligible
1417                 // (which means it is stored in every variant).
1418                 if ineligible_locals.contains(local_b)
1419                     || assignments[local_a] == assignments[local_b]
1420                 {
1421                     continue;
1422                 }
1423
1424                 // If they conflict, we will choose one to make ineligible.
1425                 // This is not always optimal; it's just a greedy heuristic that
1426                 // seems to produce good results most of the time.
1427                 let conflicts_b = info.storage_conflicts.count(local_b);
1428                 let (remove, other) =
1429                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1430                 ineligible_locals.insert(remove);
1431                 assignments[remove] = Ineligible(None);
1432                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1433             }
1434         }
1435
1436         // Count the number of variants in use. If only one of them, then it is
1437         // impossible to overlap any locals in our layout. In this case it's
1438         // always better to make the remaining locals ineligible, so we can
1439         // lay them out with the other locals in the prefix and eliminate
1440         // unnecessary padding bytes.
1441         {
1442             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1443             for assignment in &assignments {
1444                 if let Assigned(idx) = assignment {
1445                     used_variants.insert(*idx);
1446                 }
1447             }
1448             if used_variants.count() < 2 {
1449                 for assignment in assignments.iter_mut() {
1450                     *assignment = Ineligible(None);
1451                 }
1452                 ineligible_locals.insert_all();
1453             }
1454         }
1455
1456         // Write down the order of our locals that will be promoted to the prefix.
1457         {
1458             for (idx, local) in ineligible_locals.iter().enumerate() {
1459                 assignments[local] = Ineligible(Some(idx as u32));
1460             }
1461         }
1462         debug!("generator saved local assignments: {:?}", assignments);
1463
1464         (ineligible_locals, assignments)
1465     }
1466
1467     /// Compute the full generator layout.
1468     fn generator_layout(
1469         &self,
1470         ty: Ty<'tcx>,
1471         def_id: hir::def_id::DefId,
1472         substs: SubstsRef<'tcx>,
1473     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1474         use SavedLocalEligibility::*;
1475         let tcx = self.tcx;
1476
1477         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1478
1479         let info = tcx.generator_layout(def_id);
1480         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1481
1482         // Build a prefix layout, including "promoting" all ineligible
1483         // locals as part of the prefix. We compute the layout of all of
1484         // these fields at once to get optimal packing.
1485         let tag_index = substs.as_generator().prefix_tys().count();
1486
1487         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1488         let max_discr = (info.variant_fields.len() - 1) as u128;
1489         let discr_int = Integer::fit_unsigned(max_discr);
1490         let discr_int_ty = discr_int.to_ty(tcx, false);
1491         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1492         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1493         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1494
1495         let promoted_layouts = ineligible_locals
1496             .iter()
1497             .map(|local| subst_field(info.field_tys[local]))
1498             .map(|ty| tcx.mk_maybe_uninit(ty))
1499             .map(|ty| self.layout_of(ty));
1500         let prefix_layouts = substs
1501             .as_generator()
1502             .prefix_tys()
1503             .map(|ty| self.layout_of(ty))
1504             .chain(iter::once(Ok(tag_layout)))
1505             .chain(promoted_layouts)
1506             .collect::<Result<Vec<_>, _>>()?;
1507         let prefix = self.univariant_uninterned(
1508             ty,
1509             &prefix_layouts,
1510             &ReprOptions::default(),
1511             StructKind::AlwaysSized,
1512         )?;
1513
1514         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1515
1516         // Split the prefix layout into the "outer" fields (upvars and
1517         // discriminant) and the "promoted" fields. Promoted fields will
1518         // get included in each variant that requested them in
1519         // GeneratorLayout.
1520         debug!("prefix = {:#?}", prefix);
1521         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1522             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1523                 let mut inverse_memory_index = invert_mapping(&memory_index);
1524
1525                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1526                 // "outer" and "promoted" fields respectively.
1527                 let b_start = (tag_index + 1) as u32;
1528                 let offsets_b = offsets.split_off(b_start as usize);
1529                 let offsets_a = offsets;
1530
1531                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1532                 // by preserving the order but keeping only one disjoint "half" each.
1533                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1534                 let inverse_memory_index_b: Vec<_> =
1535                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1536                 inverse_memory_index.retain(|&i| i < b_start);
1537                 let inverse_memory_index_a = inverse_memory_index;
1538
1539                 // Since `inverse_memory_index_{a,b}` each only refer to their
1540                 // respective fields, they can be safely inverted
1541                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1542                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1543
1544                 let outer_fields =
1545                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1546                 (outer_fields, offsets_b, memory_index_b)
1547             }
1548             _ => bug!(),
1549         };
1550
1551         let mut size = prefix.size;
1552         let mut align = prefix.align;
1553         let variants = info
1554             .variant_fields
1555             .iter_enumerated()
1556             .map(|(index, variant_fields)| {
1557                 // Only include overlap-eligible fields when we compute our variant layout.
1558                 let variant_only_tys = variant_fields
1559                     .iter()
1560                     .filter(|local| match assignments[**local] {
1561                         Unassigned => bug!(),
1562                         Assigned(v) if v == index => true,
1563                         Assigned(_) => bug!("assignment does not match variant"),
1564                         Ineligible(_) => false,
1565                     })
1566                     .map(|local| subst_field(info.field_tys[*local]));
1567
1568                 let mut variant = self.univariant_uninterned(
1569                     ty,
1570                     &variant_only_tys
1571                         .map(|ty| self.layout_of(ty))
1572                         .collect::<Result<Vec<_>, _>>()?,
1573                     &ReprOptions::default(),
1574                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1575                 )?;
1576                 variant.variants = Variants::Single { index };
1577
1578                 let (offsets, memory_index) = match variant.fields {
1579                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1580                     _ => bug!(),
1581                 };
1582
1583                 // Now, stitch the promoted and variant-only fields back together in
1584                 // the order they are mentioned by our GeneratorLayout.
1585                 // Because we only use some subset (that can differ between variants)
1586                 // of the promoted fields, we can't just pick those elements of the
1587                 // `promoted_memory_index` (as we'd end up with gaps).
1588                 // So instead, we build an "inverse memory_index", as if all of the
1589                 // promoted fields were being used, but leave the elements not in the
1590                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1591                 // obtain a valid (bijective) mapping.
1592                 const INVALID_FIELD_IDX: u32 = !0;
1593                 let mut combined_inverse_memory_index =
1594                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1595                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1596                 let combined_offsets = variant_fields
1597                     .iter()
1598                     .enumerate()
1599                     .map(|(i, local)| {
1600                         let (offset, memory_index) = match assignments[*local] {
1601                             Unassigned => bug!(),
1602                             Assigned(_) => {
1603                                 let (offset, memory_index) =
1604                                     offsets_and_memory_index.next().unwrap();
1605                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1606                             }
1607                             Ineligible(field_idx) => {
1608                                 let field_idx = field_idx.unwrap() as usize;
1609                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1610                             }
1611                         };
1612                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1613                         offset
1614                     })
1615                     .collect();
1616
1617                 // Remove the unused slots and invert the mapping to obtain the
1618                 // combined `memory_index` (also see previous comment).
1619                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1620                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1621
1622                 variant.fields = FieldsShape::Arbitrary {
1623                     offsets: combined_offsets,
1624                     memory_index: combined_memory_index,
1625                 };
1626
1627                 size = size.max(variant.size);
1628                 align = align.max(variant.align);
1629                 Ok(variant)
1630             })
1631             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1632
1633         size = size.align_to(align.abi);
1634
1635         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1636         {
1637             Abi::Uninhabited
1638         } else {
1639             Abi::Aggregate { sized: true }
1640         };
1641
1642         let layout = tcx.intern_layout(Layout {
1643             variants: Variants::Multiple {
1644                 tag,
1645                 tag_encoding: TagEncoding::Direct,
1646                 tag_field: tag_index,
1647                 variants,
1648             },
1649             fields: outer_fields,
1650             abi,
1651             largest_niche: prefix.largest_niche,
1652             size,
1653             align,
1654         });
1655         debug!("generator layout ({:?}): {:#?}", ty, layout);
1656         Ok(layout)
1657     }
1658
1659     /// This is invoked by the `layout_raw` query to record the final
1660     /// layout of each type.
1661     #[inline(always)]
1662     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1663         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1664         // for dumping later.
1665         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1666             self.record_layout_for_printing_outlined(layout)
1667         }
1668     }
1669
1670     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1671         // Ignore layouts that are done with non-empty environments or
1672         // non-monomorphic layouts, as the user only wants to see the stuff
1673         // resulting from the final codegen session.
1674         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1675             return;
1676         }
1677
1678         // (delay format until we actually need it)
1679         let record = |kind, packed, opt_discr_size, variants| {
1680             let type_desc = format!("{:?}", layout.ty);
1681             self.tcx.sess.code_stats.record_type_size(
1682                 kind,
1683                 type_desc,
1684                 layout.align.abi,
1685                 layout.size,
1686                 packed,
1687                 opt_discr_size,
1688                 variants,
1689             );
1690         };
1691
1692         let adt_def = match *layout.ty.kind() {
1693             ty::Adt(ref adt_def, _) => {
1694                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1695                 adt_def
1696             }
1697
1698             ty::Closure(..) => {
1699                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1700                 record(DataTypeKind::Closure, false, None, vec![]);
1701                 return;
1702             }
1703
1704             _ => {
1705                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1706                 return;
1707             }
1708         };
1709
1710         let adt_kind = adt_def.adt_kind();
1711         let adt_packed = adt_def.repr.pack.is_some();
1712
1713         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1714             let mut min_size = Size::ZERO;
1715             let field_info: Vec<_> = flds
1716                 .iter()
1717                 .enumerate()
1718                 .map(|(i, &name)| match layout.field(self, i) {
1719                     Err(err) => {
1720                         bug!("no layout found for field {}: `{:?}`", name, err);
1721                     }
1722                     Ok(field_layout) => {
1723                         let offset = layout.fields.offset(i);
1724                         let field_end = offset + field_layout.size;
1725                         if min_size < field_end {
1726                             min_size = field_end;
1727                         }
1728                         FieldInfo {
1729                             name: name.to_string(),
1730                             offset: offset.bytes(),
1731                             size: field_layout.size.bytes(),
1732                             align: field_layout.align.abi.bytes(),
1733                         }
1734                     }
1735                 })
1736                 .collect();
1737
1738             VariantInfo {
1739                 name: n.map(|n| n.to_string()),
1740                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1741                 align: layout.align.abi.bytes(),
1742                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1743                 fields: field_info,
1744             }
1745         };
1746
1747         match layout.variants {
1748             Variants::Single { index } => {
1749                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1750                 if !adt_def.variants.is_empty() {
1751                     let variant_def = &adt_def.variants[index];
1752                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1753                     record(
1754                         adt_kind.into(),
1755                         adt_packed,
1756                         None,
1757                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1758                     );
1759                 } else {
1760                     // (This case arises for *empty* enums; so give it
1761                     // zero variants.)
1762                     record(adt_kind.into(), adt_packed, None, vec![]);
1763                 }
1764             }
1765
1766             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1767                 debug!(
1768                     "print-type-size `{:#?}` adt general variants def {}",
1769                     layout.ty,
1770                     adt_def.variants.len()
1771                 );
1772                 let variant_infos: Vec<_> = adt_def
1773                     .variants
1774                     .iter_enumerated()
1775                     .map(|(i, variant_def)| {
1776                         let fields: Vec<_> =
1777                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1778                         build_variant_info(
1779                             Some(variant_def.ident),
1780                             &fields,
1781                             layout.for_variant(self, i),
1782                         )
1783                     })
1784                     .collect();
1785                 record(
1786                     adt_kind.into(),
1787                     adt_packed,
1788                     match tag_encoding {
1789                         TagEncoding::Direct => Some(tag.value.size(self)),
1790                         _ => None,
1791                     },
1792                     variant_infos,
1793                 );
1794             }
1795         }
1796     }
1797 }
1798
1799 /// Type size "skeleton", i.e., the only information determining a type's size.
1800 /// While this is conservative, (aside from constant sizes, only pointers,
1801 /// newtypes thereof and null pointer optimized enums are allowed), it is
1802 /// enough to statically check common use cases of transmute.
1803 #[derive(Copy, Clone, Debug)]
1804 pub enum SizeSkeleton<'tcx> {
1805     /// Any statically computable Layout.
1806     Known(Size),
1807
1808     /// A potentially-fat pointer.
1809     Pointer {
1810         /// If true, this pointer is never null.
1811         non_zero: bool,
1812         /// The type which determines the unsized metadata, if any,
1813         /// of this pointer. Either a type parameter or a projection
1814         /// depending on one, with regions erased.
1815         tail: Ty<'tcx>,
1816     },
1817 }
1818
1819 impl<'tcx> SizeSkeleton<'tcx> {
1820     pub fn compute(
1821         ty: Ty<'tcx>,
1822         tcx: TyCtxt<'tcx>,
1823         param_env: ty::ParamEnv<'tcx>,
1824     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1825         debug_assert!(!ty.has_infer_types_or_consts());
1826
1827         // First try computing a static layout.
1828         let err = match tcx.layout_of(param_env.and(ty)) {
1829             Ok(layout) => {
1830                 return Ok(SizeSkeleton::Known(layout.size));
1831             }
1832             Err(err) => err,
1833         };
1834
1835         match *ty.kind() {
1836             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1837                 let non_zero = !ty.is_unsafe_ptr();
1838                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1839                 match tail.kind() {
1840                     ty::Param(_) | ty::Projection(_) => {
1841                         debug_assert!(tail.has_param_types_or_consts());
1842                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1843                     }
1844                     _ => bug!(
1845                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1846                               tail `{}` is not a type parameter or a projection",
1847                         ty,
1848                         err,
1849                         tail
1850                     ),
1851                 }
1852             }
1853
1854             ty::Adt(def, substs) => {
1855                 // Only newtypes and enums w/ nullable pointer optimization.
1856                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1857                     return Err(err);
1858                 }
1859
1860                 // Get a zero-sized variant or a pointer newtype.
1861                 let zero_or_ptr_variant = |i| {
1862                     let i = VariantIdx::new(i);
1863                     let fields = def.variants[i]
1864                         .fields
1865                         .iter()
1866                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1867                     let mut ptr = None;
1868                     for field in fields {
1869                         let field = field?;
1870                         match field {
1871                             SizeSkeleton::Known(size) => {
1872                                 if size.bytes() > 0 {
1873                                     return Err(err);
1874                                 }
1875                             }
1876                             SizeSkeleton::Pointer { .. } => {
1877                                 if ptr.is_some() {
1878                                     return Err(err);
1879                                 }
1880                                 ptr = Some(field);
1881                             }
1882                         }
1883                     }
1884                     Ok(ptr)
1885                 };
1886
1887                 let v0 = zero_or_ptr_variant(0)?;
1888                 // Newtype.
1889                 if def.variants.len() == 1 {
1890                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1891                         return Ok(SizeSkeleton::Pointer {
1892                             non_zero: non_zero
1893                                 || match tcx.layout_scalar_valid_range(def.did) {
1894                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1895                                     (Bound::Included(start), Bound::Included(end)) => {
1896                                         0 < start && start < end
1897                                     }
1898                                     _ => false,
1899                                 },
1900                             tail,
1901                         });
1902                     } else {
1903                         return Err(err);
1904                     }
1905                 }
1906
1907                 let v1 = zero_or_ptr_variant(1)?;
1908                 // Nullable pointer enum optimization.
1909                 match (v0, v1) {
1910                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1911                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1912                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1913                     }
1914                     _ => Err(err),
1915                 }
1916             }
1917
1918             ty::Projection(_) | ty::Opaque(..) => {
1919                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1920                 if ty == normalized {
1921                     Err(err)
1922                 } else {
1923                     SizeSkeleton::compute(normalized, tcx, param_env)
1924                 }
1925             }
1926
1927             _ => Err(err),
1928         }
1929     }
1930
1931     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1932         match (self, other) {
1933             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1934             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1935                 a == b
1936             }
1937             _ => false,
1938         }
1939     }
1940 }
1941
1942 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1943     fn tcx(&self) -> TyCtxt<'tcx>;
1944 }
1945
1946 pub trait HasParamEnv<'tcx> {
1947     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1948 }
1949
1950 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1951     fn data_layout(&self) -> &TargetDataLayout {
1952         &self.data_layout
1953     }
1954 }
1955
1956 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1957     fn tcx(&self) -> TyCtxt<'tcx> {
1958         *self
1959     }
1960 }
1961
1962 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1963     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1964         self.param_env
1965     }
1966 }
1967
1968 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1969     fn data_layout(&self) -> &TargetDataLayout {
1970         self.tcx.data_layout()
1971     }
1972 }
1973
1974 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1975     fn tcx(&self) -> TyCtxt<'tcx> {
1976         self.tcx.tcx()
1977     }
1978 }
1979
1980 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1981
1982 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1983     type Ty = Ty<'tcx>;
1984     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1985
1986     /// Computes the layout of a type. Note that this implicitly
1987     /// executes in "reveal all" mode.
1988     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1989         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1990         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1991         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1992         let layout = TyAndLayout { ty, layout };
1993
1994         // N.B., this recording is normally disabled; when enabled, it
1995         // can however trigger recursive invocations of `layout_of`.
1996         // Therefore, we execute it *after* the main query has
1997         // completed, to avoid problems around recursive structures
1998         // and the like. (Admittedly, I wasn't able to reproduce a problem
1999         // here, but it seems like the right thing to do. -nmatsakis)
2000         self.record_layout_for_printing(layout);
2001
2002         Ok(layout)
2003     }
2004 }
2005
2006 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2007     type Ty = Ty<'tcx>;
2008     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2009
2010     /// Computes the layout of a type. Note that this implicitly
2011     /// executes in "reveal all" mode.
2012     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2013         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2014         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2015         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2016         let layout = TyAndLayout { ty, layout };
2017
2018         // N.B., this recording is normally disabled; when enabled, it
2019         // can however trigger recursive invocations of `layout_of`.
2020         // Therefore, we execute it *after* the main query has
2021         // completed, to avoid problems around recursive structures
2022         // and the like. (Admittedly, I wasn't able to reproduce a problem
2023         // here, but it seems like the right thing to do. -nmatsakis)
2024         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2025         cx.record_layout_for_printing(layout);
2026
2027         Ok(layout)
2028     }
2029 }
2030
2031 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2032 impl TyCtxt<'tcx> {
2033     /// Computes the layout of a type. Note that this implicitly
2034     /// executes in "reveal all" mode.
2035     #[inline]
2036     pub fn layout_of(
2037         self,
2038         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2039     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2040         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2041         cx.layout_of(param_env_and_ty.value)
2042     }
2043 }
2044
2045 impl ty::query::TyCtxtAt<'tcx> {
2046     /// Computes the layout of a type. Note that this implicitly
2047     /// executes in "reveal all" mode.
2048     #[inline]
2049     pub fn layout_of(
2050         self,
2051         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2052     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2053         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2054         cx.layout_of(param_env_and_ty.value)
2055     }
2056 }
2057
2058 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2059 where
2060     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2061         + HasTyCtxt<'tcx>
2062         + HasParamEnv<'tcx>,
2063 {
2064     fn for_variant(
2065         this: TyAndLayout<'tcx>,
2066         cx: &C,
2067         variant_index: VariantIdx,
2068     ) -> TyAndLayout<'tcx> {
2069         let layout = match this.variants {
2070             Variants::Single { index }
2071                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2072                 if index == variant_index &&
2073                 // Don't confuse variants of uninhabited enums with the enum itself.
2074                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2075                 this.fields != FieldsShape::Primitive =>
2076             {
2077                 this.layout
2078             }
2079
2080             Variants::Single { index } => {
2081                 // Deny calling for_variant more than once for non-Single enums.
2082                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2083                     assert_eq!(original_layout.variants, Variants::Single { index });
2084                 }
2085
2086                 let fields = match this.ty.kind() {
2087                     ty::Adt(def, _) if def.variants.is_empty() =>
2088                         bug!("for_variant called on zero-variant enum"),
2089                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2090                     _ => bug!(),
2091                 };
2092                 let tcx = cx.tcx();
2093                 tcx.intern_layout(Layout {
2094                     variants: Variants::Single { index: variant_index },
2095                     fields: match NonZeroUsize::new(fields) {
2096                         Some(fields) => FieldsShape::Union(fields),
2097                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2098                     },
2099                     abi: Abi::Uninhabited,
2100                     largest_niche: None,
2101                     align: tcx.data_layout.i8_align,
2102                     size: Size::ZERO,
2103                 })
2104             }
2105
2106             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2107         };
2108
2109         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2110
2111         TyAndLayout { ty: this.ty, layout }
2112     }
2113
2114     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2115         enum TyMaybeWithLayout<C: LayoutOf> {
2116             Ty(C::Ty),
2117             TyAndLayout(C::TyAndLayout),
2118         }
2119
2120         fn ty_and_layout_kind<
2121             C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2122                 + HasTyCtxt<'tcx>
2123                 + HasParamEnv<'tcx>,
2124         >(
2125             this: TyAndLayout<'tcx>,
2126             cx: &C,
2127             i: usize,
2128             ty: C::Ty,
2129         ) -> TyMaybeWithLayout<C> {
2130             let tcx = cx.tcx();
2131             let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2132                 let layout = Layout::scalar(cx, tag.clone());
2133                 MaybeResult::from(Ok(TyAndLayout {
2134                     layout: tcx.intern_layout(layout),
2135                     ty: tag.value.to_ty(tcx),
2136                 }))
2137             };
2138
2139             match *ty.kind() {
2140                 ty::Bool
2141                 | ty::Char
2142                 | ty::Int(_)
2143                 | ty::Uint(_)
2144                 | ty::Float(_)
2145                 | ty::FnPtr(_)
2146                 | ty::Never
2147                 | ty::FnDef(..)
2148                 | ty::GeneratorWitness(..)
2149                 | ty::Foreign(..)
2150                 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2151
2152                 // Potentially-fat pointers.
2153                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2154                     assert!(i < this.fields.count());
2155
2156                     // Reuse the fat `*T` type as its own thin pointer data field.
2157                     // This provides information about, e.g., DST struct pointees
2158                     // (which may have no non-DST form), and will work as long
2159                     // as the `Abi` or `FieldsShape` is checked by users.
2160                     if i == 0 {
2161                         let nil = tcx.mk_unit();
2162                         let ptr_ty = if ty.is_unsafe_ptr() {
2163                             tcx.mk_mut_ptr(nil)
2164                         } else {
2165                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2166                         };
2167                         return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2168                             cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2169                                 ptr_layout.ty = ty;
2170                                 ptr_layout
2171                             }),
2172                         ));
2173                     }
2174
2175                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2176                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2177                         ty::Dynamic(_, _) => {
2178                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2179                                 tcx.lifetimes.re_static,
2180                                 tcx.mk_array(tcx.types.usize, 3),
2181                             ))
2182                             /* FIXME: use actual fn pointers
2183                             Warning: naively computing the number of entries in the
2184                             vtable by counting the methods on the trait + methods on
2185                             all parent traits does not work, because some methods can
2186                             be not object safe and thus excluded from the vtable.
2187                             Increase this counter if you tried to implement this but
2188                             failed to do it without duplicating a lot of code from
2189                             other places in the compiler: 2
2190                             tcx.mk_tup(&[
2191                                 tcx.mk_array(tcx.types.usize, 3),
2192                                 tcx.mk_array(Option<fn()>),
2193                             ])
2194                             */
2195                         }
2196                         _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2197                     }
2198                 }
2199
2200                 // Arrays and slices.
2201                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2202                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2203
2204                 // Tuples, generators and closures.
2205                 ty::Closure(_, ref substs) => {
2206                     ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2207                 }
2208
2209                 ty::Generator(def_id, ref substs, _) => match this.variants {
2210                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2211                         substs
2212                             .as_generator()
2213                             .state_tys(def_id, tcx)
2214                             .nth(index.as_usize())
2215                             .unwrap()
2216                             .nth(i)
2217                             .unwrap(),
2218                     ),
2219                     Variants::Multiple { ref tag, tag_field, .. } => {
2220                         if i == tag_field {
2221                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2222                         }
2223                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2224                     }
2225                 },
2226
2227                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2228
2229                 // ADTs.
2230                 ty::Adt(def, substs) => {
2231                     match this.variants {
2232                         Variants::Single { index } => {
2233                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2234                         }
2235
2236                         // Discriminant field for enums (where applicable).
2237                         Variants::Multiple { ref tag, .. } => {
2238                             assert_eq!(i, 0);
2239                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2240                         }
2241                     }
2242                 }
2243
2244                 ty::Projection(_)
2245                 | ty::Bound(..)
2246                 | ty::Placeholder(..)
2247                 | ty::Opaque(..)
2248                 | ty::Param(_)
2249                 | ty::Infer(_)
2250                 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2251             }
2252         }
2253
2254         cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2255             TyMaybeWithLayout::Ty(result) => result,
2256             TyMaybeWithLayout::TyAndLayout(result) => return result,
2257         })
2258     }
2259
2260     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2261         let addr_space_of_ty = |ty: Ty<'tcx>| {
2262             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2263         };
2264
2265         let pointee_info = match *this.ty.kind() {
2266             ty::RawPtr(mt) if offset.bytes() == 0 => {
2267                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2268                     size: layout.size,
2269                     align: layout.align.abi,
2270                     safe: None,
2271                     address_space: addr_space_of_ty(mt.ty),
2272                 })
2273             }
2274             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2275                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2276                     PointeeInfo {
2277                         size: layout.size,
2278                         align: layout.align.abi,
2279                         safe: None,
2280                         address_space: cx.data_layout().instruction_address_space,
2281                     }
2282                 })
2283             }
2284             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2285                 let address_space = addr_space_of_ty(ty);
2286                 let tcx = cx.tcx();
2287                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2288                 let kind = match mt {
2289                     hir::Mutability::Not => {
2290                         if is_freeze {
2291                             PointerKind::Frozen
2292                         } else {
2293                             PointerKind::Shared
2294                         }
2295                     }
2296                     hir::Mutability::Mut => {
2297                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2298                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2299                         // in conjunction with unwinding, but later versions didn’t seem to have
2300                         // said issues. See issue #31681.
2301                         //
2302                         // Alas, later on we encountered a case where noalias would generate wrong
2303                         // code altogether even with recent versions of LLVM in *safe* code with no
2304                         // unwinding involved. See #54462.
2305                         //
2306                         // For now, do not enable mutable_noalias by default at all, while the
2307                         // issue is being figured out.
2308                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2309                             PointerKind::UniqueBorrowed
2310                         } else {
2311                             PointerKind::Shared
2312                         }
2313                     }
2314                 };
2315
2316                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2317                     size: layout.size,
2318                     align: layout.align.abi,
2319                     safe: Some(kind),
2320                     address_space,
2321                 })
2322             }
2323
2324             _ => {
2325                 let mut data_variant = match this.variants {
2326                     // Within the discriminant field, only the niche itself is
2327                     // always initialized, so we only check for a pointer at its
2328                     // offset.
2329                     //
2330                     // If the niche is a pointer, it's either valid (according
2331                     // to its type), or null (which the niche field's scalar
2332                     // validity range encodes).  This allows using
2333                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2334                     // this will continue to work as long as we don't start
2335                     // using more niches than just null (e.g., the first page of
2336                     // the address space, or unaligned pointers).
2337                     Variants::Multiple {
2338                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2339                         tag_field,
2340                         ..
2341                     } if this.fields.offset(tag_field) == offset => {
2342                         Some(this.for_variant(cx, dataful_variant))
2343                     }
2344                     _ => Some(this),
2345                 };
2346
2347                 if let Some(variant) = data_variant {
2348                     // We're not interested in any unions.
2349                     if let FieldsShape::Union(_) = variant.fields {
2350                         data_variant = None;
2351                     }
2352                 }
2353
2354                 let mut result = None;
2355
2356                 if let Some(variant) = data_variant {
2357                     let ptr_end = offset + Pointer.size(cx);
2358                     for i in 0..variant.fields.count() {
2359                         let field_start = variant.fields.offset(i);
2360                         if field_start <= offset {
2361                             let field = variant.field(cx, i);
2362                             result = field.to_result().ok().and_then(|field| {
2363                                 if ptr_end <= field_start + field.size {
2364                                     // We found the right field, look inside it.
2365                                     let field_info =
2366                                         field.pointee_info_at(cx, offset - field_start);
2367                                     field_info
2368                                 } else {
2369                                     None
2370                                 }
2371                             });
2372                             if result.is_some() {
2373                                 break;
2374                             }
2375                         }
2376                     }
2377                 }
2378
2379                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2380                 if let Some(ref mut pointee) = result {
2381                     if let ty::Adt(def, _) = this.ty.kind() {
2382                         if def.is_box() && offset.bytes() == 0 {
2383                             pointee.safe = Some(PointerKind::UniqueOwned);
2384                         }
2385                     }
2386                 }
2387
2388                 result
2389             }
2390         };
2391
2392         debug!(
2393             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2394             offset,
2395             this.ty.kind(),
2396             pointee_info
2397         );
2398
2399         pointee_info
2400     }
2401 }
2402
2403 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2404     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2405         use crate::ty::layout::LayoutError::*;
2406         mem::discriminant(self).hash_stable(hcx, hasher);
2407
2408         match *self {
2409             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2410         }
2411     }
2412 }
2413
2414 impl<'tcx> ty::Instance<'tcx> {
2415     // NOTE(eddyb) this is private to avoid using it from outside of
2416     // `FnAbi::of_instance` - any other uses are either too high-level
2417     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2418     // or should go through `FnAbi` instead, to avoid losing any
2419     // adjustments `FnAbi::of_instance` might be performing.
2420     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2421         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2422         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2423         match *ty.kind() {
2424             ty::FnDef(..) => {
2425                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2426                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2427                 // (i.e. due to being inside a projection that got normalized, see
2428                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2429                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2430                 let mut sig = match *ty.kind() {
2431                     ty::FnDef(def_id, substs) => tcx
2432                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2433                         .subst(tcx, substs),
2434                     _ => unreachable!(),
2435                 };
2436
2437                 if let ty::InstanceDef::VtableShim(..) = self.def {
2438                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2439                     sig = sig.map_bound(|mut sig| {
2440                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2441                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2442                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2443                         sig
2444                     });
2445                 }
2446                 sig
2447             }
2448             ty::Closure(def_id, substs) => {
2449                 let sig = substs.as_closure().sig();
2450
2451                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2452                 sig.map_bound(|sig| {
2453                     tcx.mk_fn_sig(
2454                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2455                         sig.output(),
2456                         sig.c_variadic,
2457                         sig.unsafety,
2458                         sig.abi,
2459                     )
2460                 })
2461             }
2462             ty::Generator(_, substs, _) => {
2463                 let sig = substs.as_generator().poly_sig();
2464
2465                 let br = ty::BoundRegion { kind: ty::BrEnv };
2466                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2467                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2468
2469                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2470                 let pin_adt_ref = tcx.adt_def(pin_did);
2471                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2472                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2473
2474                 sig.map_bound(|sig| {
2475                     let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2476                     let state_adt_ref = tcx.adt_def(state_did);
2477                     let state_substs =
2478                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2479                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2480
2481                     tcx.mk_fn_sig(
2482                         [env_ty, sig.resume_ty].iter(),
2483                         &ret_ty,
2484                         false,
2485                         hir::Unsafety::Normal,
2486                         rustc_target::spec::abi::Abi::Rust,
2487                     )
2488                 })
2489             }
2490             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2491         }
2492     }
2493 }
2494
2495 pub trait FnAbiExt<'tcx, C>
2496 where
2497     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2498         + HasDataLayout
2499         + HasTargetSpec
2500         + HasTyCtxt<'tcx>
2501         + HasParamEnv<'tcx>,
2502 {
2503     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2504     ///
2505     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2506     /// instead, where the instance is a `InstanceDef::Virtual`.
2507     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2508
2509     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2510     /// direct calls to an `fn`.
2511     ///
2512     /// NB: that includes virtual calls, which are represented by "direct calls"
2513     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2514     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2515
2516     fn new_internal(
2517         cx: &C,
2518         sig: ty::PolyFnSig<'tcx>,
2519         extra_args: &[Ty<'tcx>],
2520         caller_location: Option<Ty<'tcx>>,
2521         codegen_fn_attr_flags: CodegenFnAttrFlags,
2522         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2523     ) -> Self;
2524     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2525 }
2526
2527 fn fn_can_unwind(
2528     panic_strategy: PanicStrategy,
2529     codegen_fn_attr_flags: CodegenFnAttrFlags,
2530     call_conv: Conv,
2531 ) -> bool {
2532     if panic_strategy != PanicStrategy::Unwind {
2533         // In panic=abort mode we assume nothing can unwind anywhere, so
2534         // optimize based on this!
2535         false
2536     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2537         // If a specific #[unwind] attribute is present, use that.
2538         true
2539     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2540         // Special attribute for allocator functions, which can't unwind.
2541         false
2542     } else {
2543         if call_conv == Conv::Rust {
2544             // Any Rust method (or `extern "Rust" fn` or `extern
2545             // "rust-call" fn`) is explicitly allowed to unwind
2546             // (unless it has no-unwind attribute, handled above).
2547             true
2548         } else {
2549             // Anything else is either:
2550             //
2551             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2552             //
2553             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2554             //
2555             // Foreign items (case 1) are assumed to not unwind; it is
2556             // UB otherwise. (At least for now; see also
2557             // rust-lang/rust#63909 and Rust RFC 2753.)
2558             //
2559             // Items defined in Rust with non-Rust ABIs (case 2) are also
2560             // not supposed to unwind. Whether this should be enforced
2561             // (versus stating it is UB) and *how* it would be enforced
2562             // is currently under discussion; see rust-lang/rust#58794.
2563             //
2564             // In either case, we mark item as explicitly nounwind.
2565             false
2566         }
2567     }
2568 }
2569
2570 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2571 where
2572     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2573         + HasDataLayout
2574         + HasTargetSpec
2575         + HasTyCtxt<'tcx>
2576         + HasParamEnv<'tcx>,
2577 {
2578     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2579         // Assume that fn pointers may always unwind
2580         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2581
2582         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2583             ArgAbi::new(cx.layout_of(ty))
2584         })
2585     }
2586
2587     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2588         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2589
2590         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2591             Some(cx.tcx().caller_location_ty())
2592         } else {
2593             None
2594         };
2595
2596         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2597
2598         call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2599             let mut layout = cx.layout_of(ty);
2600             // Don't pass the vtable, it's not an argument of the virtual fn.
2601             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2602             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2603             if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2604                 let fat_pointer_ty = if layout.is_unsized() {
2605                     // unsized `self` is passed as a pointer to `self`
2606                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2607                     cx.tcx().mk_mut_ptr(layout.ty)
2608                 } else {
2609                     match layout.abi {
2610                         Abi::ScalarPair(..) => (),
2611                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2612                     }
2613
2614                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2615                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2616                     // elsewhere in the compiler as a method on a `dyn Trait`.
2617                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2618                     // get a built-in pointer type
2619                     let mut fat_pointer_layout = layout;
2620                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2621                         && !fat_pointer_layout.ty.is_region_ptr()
2622                     {
2623                         for i in 0..fat_pointer_layout.fields.count() {
2624                             let field_layout = fat_pointer_layout.field(cx, i);
2625
2626                             if !field_layout.is_zst() {
2627                                 fat_pointer_layout = field_layout;
2628                                 continue 'descend_newtypes;
2629                             }
2630                         }
2631
2632                         bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2633                     }
2634
2635                     fat_pointer_layout.ty
2636                 };
2637
2638                 // we now have a type like `*mut RcBox<dyn Trait>`
2639                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2640                 // this is understood as a special case elsewhere in the compiler
2641                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2642                 layout = cx.layout_of(unit_pointer_ty);
2643                 layout.ty = fat_pointer_ty;
2644             }
2645             ArgAbi::new(layout)
2646         })
2647     }
2648
2649     fn new_internal(
2650         cx: &C,
2651         sig: ty::PolyFnSig<'tcx>,
2652         extra_args: &[Ty<'tcx>],
2653         caller_location: Option<Ty<'tcx>>,
2654         codegen_fn_attr_flags: CodegenFnAttrFlags,
2655         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2656     ) -> Self {
2657         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2658
2659         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2660
2661         use rustc_target::spec::abi::Abi::*;
2662         let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2663             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2664
2665             // It's the ABI's job to select this, not ours.
2666             System => bug!("system abi should be selected elsewhere"),
2667             EfiApi => bug!("eficall abi should be selected elsewhere"),
2668
2669             Stdcall => Conv::X86Stdcall,
2670             Fastcall => Conv::X86Fastcall,
2671             Vectorcall => Conv::X86VectorCall,
2672             Thiscall => Conv::X86ThisCall,
2673             C => Conv::C,
2674             Unadjusted => Conv::C,
2675             Win64 => Conv::X86_64Win64,
2676             SysV64 => Conv::X86_64SysV,
2677             Aapcs => Conv::ArmAapcs,
2678             PtxKernel => Conv::PtxKernel,
2679             Msp430Interrupt => Conv::Msp430Intr,
2680             X86Interrupt => Conv::X86Intr,
2681             AmdGpuKernel => Conv::AmdGpuKernel,
2682             AvrInterrupt => Conv::AvrInterrupt,
2683             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2684
2685             // These API constants ought to be more specific...
2686             Cdecl => Conv::C,
2687         };
2688
2689         let mut inputs = sig.inputs();
2690         let extra_args = if sig.abi == RustCall {
2691             assert!(!sig.c_variadic && extra_args.is_empty());
2692
2693             if let Some(input) = sig.inputs().last() {
2694                 if let ty::Tuple(tupled_arguments) = input.kind() {
2695                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2696                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2697                 } else {
2698                     bug!(
2699                         "argument to function with \"rust-call\" ABI \
2700                             is not a tuple"
2701                     );
2702                 }
2703             } else {
2704                 bug!(
2705                     "argument to function with \"rust-call\" ABI \
2706                         is not a tuple"
2707                 );
2708             }
2709         } else {
2710             assert!(sig.c_variadic || extra_args.is_empty());
2711             extra_args.to_vec()
2712         };
2713
2714         let target = &cx.tcx().sess.target;
2715         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2716         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2717         let linux_s390x_gnu_like =
2718             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2719         let linux_sparc64_gnu_like =
2720             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2721         let linux_powerpc_gnu_like =
2722             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2723         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2724
2725         // Handle safe Rust thin and fat pointers.
2726         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2727                                       scalar: &Scalar,
2728                                       layout: TyAndLayout<'tcx>,
2729                                       offset: Size,
2730                                       is_return: bool| {
2731             // Booleans are always an i1 that needs to be zero-extended.
2732             if scalar.is_bool() {
2733                 attrs.ext(ArgExtension::Zext);
2734                 return;
2735             }
2736
2737             // Only pointer types handled below.
2738             if scalar.value != Pointer {
2739                 return;
2740             }
2741
2742             if scalar.valid_range.start() < scalar.valid_range.end() {
2743                 if *scalar.valid_range.start() > 0 {
2744                     attrs.set(ArgAttribute::NonNull);
2745                 }
2746             }
2747
2748             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2749                 if let Some(kind) = pointee.safe {
2750                     attrs.pointee_align = Some(pointee.align);
2751
2752                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2753                     // for the entire duration of the function as they can be deallocated
2754                     // at any time. Set their valid size to 0.
2755                     attrs.pointee_size = match kind {
2756                         PointerKind::UniqueOwned => Size::ZERO,
2757                         _ => pointee.size,
2758                     };
2759
2760                     // `Box` pointer parameters never alias because ownership is transferred
2761                     // `&mut` pointer parameters never alias other parameters,
2762                     // or mutable global data
2763                     //
2764                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2765                     // and can be marked as both `readonly` and `noalias`, as
2766                     // LLVM's definition of `noalias` is based solely on memory
2767                     // dependencies rather than pointer equality
2768                     let no_alias = match kind {
2769                         PointerKind::Shared => false,
2770                         PointerKind::UniqueOwned => true,
2771                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2772                     };
2773                     if no_alias {
2774                         attrs.set(ArgAttribute::NoAlias);
2775                     }
2776
2777                     if kind == PointerKind::Frozen && !is_return {
2778                         attrs.set(ArgAttribute::ReadOnly);
2779                     }
2780                 }
2781             }
2782         };
2783
2784         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2785             let is_return = arg_idx.is_none();
2786             let mut arg = mk_arg_type(ty, arg_idx);
2787             if arg.layout.is_zst() {
2788                 // For some forsaken reason, x86_64-pc-windows-gnu
2789                 // doesn't ignore zero-sized struct arguments.
2790                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2791                 if is_return
2792                     || rust_abi
2793                     || (!win_x64_gnu
2794                         && !linux_s390x_gnu_like
2795                         && !linux_sparc64_gnu_like
2796                         && !linux_powerpc_gnu_like)
2797                 {
2798                     arg.mode = PassMode::Ignore;
2799                 }
2800             }
2801
2802             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2803             if !is_return && rust_abi {
2804                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2805                     let mut a_attrs = ArgAttributes::new();
2806                     let mut b_attrs = ArgAttributes::new();
2807                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2808                     adjust_for_rust_scalar(
2809                         &mut b_attrs,
2810                         b,
2811                         arg.layout,
2812                         a.value.size(cx).align_to(b.value.align(cx).abi),
2813                         false,
2814                     );
2815                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2816                     return arg;
2817                 }
2818             }
2819
2820             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2821                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2822                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2823                 }
2824             }
2825
2826             arg
2827         };
2828
2829         let mut fn_abi = FnAbi {
2830             ret: arg_of(sig.output(), None),
2831             args: inputs
2832                 .iter()
2833                 .cloned()
2834                 .chain(extra_args)
2835                 .chain(caller_location)
2836                 .enumerate()
2837                 .map(|(i, ty)| arg_of(ty, Some(i)))
2838                 .collect(),
2839             c_variadic: sig.c_variadic,
2840             fixed_count: inputs.len(),
2841             conv,
2842             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2843         };
2844         fn_abi.adjust_for_abi(cx, sig.abi);
2845         debug!("FnAbi::new_internal = {:?}", fn_abi);
2846         fn_abi
2847     }
2848
2849     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2850         if abi == SpecAbi::Unadjusted {
2851             return;
2852         }
2853
2854         if abi == SpecAbi::Rust
2855             || abi == SpecAbi::RustCall
2856             || abi == SpecAbi::RustIntrinsic
2857             || abi == SpecAbi::PlatformIntrinsic
2858         {
2859             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2860                 if arg.is_ignore() {
2861                     return;
2862                 }
2863
2864                 match arg.layout.abi {
2865                     Abi::Aggregate { .. } => {}
2866
2867                     // This is a fun case! The gist of what this is doing is
2868                     // that we want callers and callees to always agree on the
2869                     // ABI of how they pass SIMD arguments. If we were to *not*
2870                     // make these arguments indirect then they'd be immediates
2871                     // in LLVM, which means that they'd used whatever the
2872                     // appropriate ABI is for the callee and the caller. That
2873                     // means, for example, if the caller doesn't have AVX
2874                     // enabled but the callee does, then passing an AVX argument
2875                     // across this boundary would cause corrupt data to show up.
2876                     //
2877                     // This problem is fixed by unconditionally passing SIMD
2878                     // arguments through memory between callers and callees
2879                     // which should get them all to agree on ABI regardless of
2880                     // target feature sets. Some more information about this
2881                     // issue can be found in #44367.
2882                     //
2883                     // Note that the platform intrinsic ABI is exempt here as
2884                     // that's how we connect up to LLVM and it's unstable
2885                     // anyway, we control all calls to it in libstd.
2886                     Abi::Vector { .. }
2887                         if abi != SpecAbi::PlatformIntrinsic
2888                             && cx.tcx().sess.target.simd_types_indirect =>
2889                     {
2890                         arg.make_indirect();
2891                         return;
2892                     }
2893
2894                     _ => return,
2895                 }
2896
2897                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2898                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2899                 let max_by_val_size = Pointer.size(cx) * 2;
2900                 let size = arg.layout.size;
2901
2902                 if arg.layout.is_unsized() || size > max_by_val_size {
2903                     arg.make_indirect();
2904                 } else {
2905                     // We want to pass small aggregates as immediates, but using
2906                     // a LLVM aggregate type for this leads to bad optimizations,
2907                     // so we pick an appropriately sized integer type instead.
2908                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2909                 }
2910             };
2911             fixup(&mut self.ret);
2912             for arg in &mut self.args {
2913                 fixup(arg);
2914             }
2915             return;
2916         }
2917
2918         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2919             cx.tcx().sess.fatal(&msg);
2920         }
2921     }
2922 }