]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
adee88ac1c95fdc88d2327666189a76b2589ab3e
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast as ast;
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
35     fn repr_discr<'tcx>(
36         tcx: TyCtxt<'tcx>,
37         ty: Ty<'tcx>,
38         repr: &ReprOptions,
39         min: i128,
40         max: i128,
41     ) -> (Integer, bool);
42 }
43
44 impl IntegerExt for Integer {
45     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
46         match (*self, signed) {
47             (I8, false) => tcx.types.u8,
48             (I16, false) => tcx.types.u16,
49             (I32, false) => tcx.types.u32,
50             (I64, false) => tcx.types.u64,
51             (I128, false) => tcx.types.u128,
52             (I8, true) => tcx.types.i8,
53             (I16, true) => tcx.types.i16,
54             (I32, true) => tcx.types.i32,
55             (I64, true) => tcx.types.i64,
56             (I128, true) => tcx.types.i128,
57         }
58     }
59
60     /// Gets the Integer type from an attr::IntType.
61     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
62         let dl = cx.data_layout();
63
64         match ity {
65             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
66             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
67             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
68             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
69             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
70             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
71                 dl.ptr_sized_integer()
72             }
73         }
74     }
75
76     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
77         match ity {
78             ty::IntTy::I8 => I8,
79             ty::IntTy::I16 => I16,
80             ty::IntTy::I32 => I32,
81             ty::IntTy::I64 => I64,
82             ty::IntTy::I128 => I128,
83             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
84         }
85     }
86     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
87         match ity {
88             ty::UintTy::U8 => I8,
89             ty::UintTy::U16 => I16,
90             ty::UintTy::U32 => I32,
91             ty::UintTy::U64 => I64,
92             ty::UintTy::U128 => I128,
93             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
94         }
95     }
96
97     /// Finds the appropriate Integer type and signedness for the given
98     /// signed discriminant range and `#[repr]` attribute.
99     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
100     /// that shouldn't affect anything, other than maybe debuginfo.
101     fn repr_discr<'tcx>(
102         tcx: TyCtxt<'tcx>,
103         ty: Ty<'tcx>,
104         repr: &ReprOptions,
105         min: i128,
106         max: i128,
107     ) -> (Integer, bool) {
108         // Theoretically, negative values could be larger in unsigned representation
109         // than the unsigned representation of the signed minimum. However, if there
110         // are any negative values, the only valid unsigned representation is u128
111         // which can fit all i128 values, so the result remains unaffected.
112         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
113         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
114
115         let mut min_from_extern = None;
116         let min_default = I8;
117
118         if let Some(ity) = repr.int {
119             let discr = Integer::from_attr(&tcx, ity);
120             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
121             if discr < fit {
122                 bug!(
123                     "Integer::repr_discr: `#[repr]` hint too small for \
124                       discriminant range of enum `{}",
125                     ty
126                 )
127             }
128             return (discr, ity.is_signed());
129         }
130
131         if repr.c() {
132             match &tcx.sess.target.arch[..] {
133                 // WARNING: the ARM EABI has two variants; the one corresponding
134                 // to `at_least == I32` appears to be used on Linux and NetBSD,
135                 // but some systems may use the variant corresponding to no
136                 // lower bound. However, we don't run on those yet...?
137                 "arm" => min_from_extern = Some(I32),
138                 _ => min_from_extern = Some(I32),
139             }
140         }
141
142         let at_least = min_from_extern.unwrap_or(min_default);
143
144         // If there are no negative values, we can use the unsigned fit.
145         if min >= 0 {
146             (cmp::max(unsigned_fit, at_least), false)
147         } else {
148             (cmp::max(signed_fit, at_least), true)
149         }
150     }
151 }
152
153 pub trait PrimitiveExt {
154     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 }
157
158 impl PrimitiveExt for Primitive {
159     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
160         match *self {
161             Int(i, signed) => i.to_ty(tcx, signed),
162             F32 => tcx.types.f32,
163             F64 => tcx.types.f64,
164             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
165         }
166     }
167
168     /// Return an *integer* type matching this primitive.
169     /// Useful in particular when dealing with enum discriminants.
170     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
171         match *self {
172             Int(i, signed) => i.to_ty(tcx, signed),
173             Pointer => tcx.types.usize,
174             F32 | F64 => bug!("floats do not have an int type"),
175         }
176     }
177 }
178
179 /// The first half of a fat pointer.
180 ///
181 /// - For a trait object, this is the address of the box.
182 /// - For a slice, this is the base address.
183 pub const FAT_PTR_ADDR: usize = 0;
184
185 /// The second half of a fat pointer.
186 ///
187 /// - For a trait object, this is the address of the vtable.
188 /// - For a slice, this is the length.
189 pub const FAT_PTR_EXTRA: usize = 1;
190
191 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
192 pub enum LayoutError<'tcx> {
193     Unknown(Ty<'tcx>),
194     SizeOverflow(Ty<'tcx>),
195 }
196
197 impl<'tcx> fmt::Display for LayoutError<'tcx> {
198     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
199         match *self {
200             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
201             LayoutError::SizeOverflow(ty) => {
202                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
203             }
204         }
205     }
206 }
207
208 fn layout_raw<'tcx>(
209     tcx: TyCtxt<'tcx>,
210     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
211 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
212     ty::tls::with_related_context(tcx, move |icx| {
213         let (param_env, ty) = query.into_parts();
214
215         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
216             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
217         }
218
219         // Update the ImplicitCtxt to increase the layout_depth
220         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
221
222         ty::tls::enter_context(&icx, |_| {
223             let cx = LayoutCx { tcx, param_env };
224             let layout = cx.layout_raw_uncached(ty);
225             // Type-level uninhabitedness should always imply ABI uninhabitedness.
226             if let Ok(layout) = layout {
227                 if ty.conservative_is_privately_uninhabited(tcx) {
228                     assert!(layout.abi.is_uninhabited());
229                 }
230             }
231             layout
232         })
233     })
234 }
235
236 pub fn provide(providers: &mut ty::query::Providers) {
237     *providers = ty::query::Providers { layout_raw, ..*providers };
238 }
239
240 pub struct LayoutCx<'tcx, C> {
241     pub tcx: C,
242     pub param_env: ty::ParamEnv<'tcx>,
243 }
244
245 #[derive(Copy, Clone, Debug)]
246 enum StructKind {
247     /// A tuple, closure, or univariant which cannot be coerced to unsized.
248     AlwaysSized,
249     /// A univariant, the last field of which may be coerced to unsized.
250     MaybeUnsized,
251     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
252     Prefixed(Size, Align),
253 }
254
255 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
256 // This is used to go between `memory_index` (source field order to memory order)
257 // and `inverse_memory_index` (memory order to source field order).
258 // See also `FieldsShape::Arbitrary::memory_index` for more details.
259 // FIXME(eddyb) build a better abstraction for permutations, if possible.
260 fn invert_mapping(map: &[u32]) -> Vec<u32> {
261     let mut inverse = vec![0; map.len()];
262     for i in 0..map.len() {
263         inverse[map[i] as usize] = i as u32;
264     }
265     inverse
266 }
267
268 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
269     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
270         let dl = self.data_layout();
271         let b_align = b.value.align(dl);
272         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
273         let b_offset = a.value.size(dl).align_to(b_align.abi);
274         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
275
276         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
277         // returns the last maximum.
278         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
279             .into_iter()
280             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
281             .max_by_key(|niche| niche.available(dl));
282
283         Layout {
284             variants: Variants::Single { index: VariantIdx::new(0) },
285             fields: FieldsShape::Arbitrary {
286                 offsets: vec![Size::ZERO, b_offset],
287                 memory_index: vec![0, 1],
288             },
289             abi: Abi::ScalarPair(a, b),
290             largest_niche,
291             align,
292             size,
293         }
294     }
295
296     fn univariant_uninterned(
297         &self,
298         ty: Ty<'tcx>,
299         fields: &[TyAndLayout<'_>],
300         repr: &ReprOptions,
301         kind: StructKind,
302     ) -> Result<Layout, LayoutError<'tcx>> {
303         let dl = self.data_layout();
304         let pack = repr.pack;
305         if pack.is_some() && repr.align.is_some() {
306             bug!("struct cannot be packed and aligned");
307         }
308
309         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
310
311         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
312
313         let optimize = !repr.inhibit_struct_field_reordering_opt();
314         if optimize {
315             let end =
316                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
317             let optimizing = &mut inverse_memory_index[..end];
318             let field_align = |f: &TyAndLayout<'_>| {
319                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
320             };
321             match kind {
322                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
323                     optimizing.sort_by_key(|&x| {
324                         // Place ZSTs first to avoid "interesting offsets",
325                         // especially with only one or two non-ZST fields.
326                         let f = &fields[x as usize];
327                         (!f.is_zst(), cmp::Reverse(field_align(f)))
328                     });
329                 }
330                 StructKind::Prefixed(..) => {
331                     // Sort in ascending alignment so that the layout stay optimal
332                     // regardless of the prefix
333                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
334                 }
335             }
336         }
337
338         // inverse_memory_index holds field indices by increasing memory offset.
339         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
340         // We now write field offsets to the corresponding offset slot;
341         // field 5 with offset 0 puts 0 in offsets[5].
342         // At the bottom of this function, we invert `inverse_memory_index` to
343         // produce `memory_index` (see `invert_mapping`).
344
345         let mut sized = true;
346         let mut offsets = vec![Size::ZERO; fields.len()];
347         let mut offset = Size::ZERO;
348         let mut largest_niche = None;
349         let mut largest_niche_available = 0;
350
351         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
352             let prefix_align =
353                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
354             align = align.max(AbiAndPrefAlign::new(prefix_align));
355             offset = prefix_size.align_to(prefix_align);
356         }
357
358         for &i in &inverse_memory_index {
359             let field = fields[i as usize];
360             if !sized {
361                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
362             }
363
364             if field.is_unsized() {
365                 sized = false;
366             }
367
368             // Invariant: offset < dl.obj_size_bound() <= 1<<61
369             let field_align = if let Some(pack) = pack {
370                 field.align.min(AbiAndPrefAlign::new(pack))
371             } else {
372                 field.align
373             };
374             offset = offset.align_to(field_align.abi);
375             align = align.max(field_align);
376
377             debug!("univariant offset: {:?} field: {:#?}", offset, field);
378             offsets[i as usize] = offset;
379
380             if !repr.hide_niche() {
381                 if let Some(mut niche) = field.largest_niche.clone() {
382                     let available = niche.available(dl);
383                     if available > largest_niche_available {
384                         largest_niche_available = available;
385                         niche.offset += offset;
386                         largest_niche = Some(niche);
387                     }
388                 }
389             }
390
391             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
392         }
393
394         if let Some(repr_align) = repr.align {
395             align = align.max(AbiAndPrefAlign::new(repr_align));
396         }
397
398         debug!("univariant min_size: {:?}", offset);
399         let min_size = offset;
400
401         // As stated above, inverse_memory_index holds field indices by increasing offset.
402         // This makes it an already-sorted view of the offsets vec.
403         // To invert it, consider:
404         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
405         // Field 5 would be the first element, so memory_index is i:
406         // Note: if we didn't optimize, it's already right.
407
408         let memory_index =
409             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
410
411         let size = min_size.align_to(align.abi);
412         let mut abi = Abi::Aggregate { sized };
413
414         // Unpack newtype ABIs and find scalar pairs.
415         if sized && size.bytes() > 0 {
416             // All other fields must be ZSTs.
417             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
418
419             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
420                 // We have exactly one non-ZST field.
421                 (Some((i, field)), None, None) => {
422                     // Field fills the struct and it has a scalar or scalar pair ABI.
423                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
424                     {
425                         match field.abi {
426                             // For plain scalars, or vectors of them, we can't unpack
427                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
428                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
429                                 abi = field.abi.clone();
430                             }
431                             // But scalar pairs are Rust-specific and get
432                             // treated as aggregates by C ABIs anyway.
433                             Abi::ScalarPair(..) => {
434                                 abi = field.abi.clone();
435                             }
436                             _ => {}
437                         }
438                     }
439                 }
440
441                 // Two non-ZST fields, and they're both scalars.
442                 (
443                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
444                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
445                     None,
446                 ) => {
447                     // Order by the memory placement, not source order.
448                     let ((i, a), (j, b)) =
449                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
450                     let pair = self.scalar_pair(a.clone(), b.clone());
451                     let pair_offsets = match pair.fields {
452                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
453                             assert_eq!(memory_index, &[0, 1]);
454                             offsets
455                         }
456                         _ => bug!(),
457                     };
458                     if offsets[i] == pair_offsets[0]
459                         && offsets[j] == pair_offsets[1]
460                         && align == pair.align
461                         && size == pair.size
462                     {
463                         // We can use `ScalarPair` only when it matches our
464                         // already computed layout (including `#[repr(C)]`).
465                         abi = pair.abi;
466                     }
467                 }
468
469                 _ => {}
470             }
471         }
472
473         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
474             abi = Abi::Uninhabited;
475         }
476
477         Ok(Layout {
478             variants: Variants::Single { index: VariantIdx::new(0) },
479             fields: FieldsShape::Arbitrary { offsets, memory_index },
480             abi,
481             largest_niche,
482             align,
483             size,
484         })
485     }
486
487     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
488         let tcx = self.tcx;
489         let param_env = self.param_env;
490         let dl = self.data_layout();
491         let scalar_unit = |value: Primitive| {
492             let bits = value.size(dl).bits();
493             assert!(bits <= 128);
494             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
495         };
496         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
497
498         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
499             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
500         };
501         debug_assert!(!ty.has_infer_types_or_consts());
502
503         Ok(match *ty.kind() {
504             // Basic scalars.
505             ty::Bool => tcx.intern_layout(Layout::scalar(
506                 self,
507                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
508             )),
509             ty::Char => tcx.intern_layout(Layout::scalar(
510                 self,
511                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
512             )),
513             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
514             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
515             ty::Float(fty) => scalar(match fty {
516                 ty::FloatTy::F32 => F32,
517                 ty::FloatTy::F64 => F64,
518             }),
519             ty::FnPtr(_) => {
520                 let mut ptr = scalar_unit(Pointer);
521                 ptr.valid_range = 1..=*ptr.valid_range.end();
522                 tcx.intern_layout(Layout::scalar(self, ptr))
523             }
524
525             // The never type.
526             ty::Never => tcx.intern_layout(Layout {
527                 variants: Variants::Single { index: VariantIdx::new(0) },
528                 fields: FieldsShape::Primitive,
529                 abi: Abi::Uninhabited,
530                 largest_niche: None,
531                 align: dl.i8_align,
532                 size: Size::ZERO,
533             }),
534
535             // Potentially-wide pointers.
536             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
537                 let mut data_ptr = scalar_unit(Pointer);
538                 if !ty.is_unsafe_ptr() {
539                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
540                 }
541
542                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
543                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
544                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
545                 }
546
547                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
548                 let metadata = match unsized_part.kind() {
549                     ty::Foreign(..) => {
550                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
551                     }
552                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
553                     ty::Dynamic(..) => {
554                         let mut vtable = scalar_unit(Pointer);
555                         vtable.valid_range = 1..=*vtable.valid_range.end();
556                         vtable
557                     }
558                     _ => return Err(LayoutError::Unknown(unsized_part)),
559                 };
560
561                 // Effectively a (ptr, meta) tuple.
562                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
563             }
564
565             // Arrays and slices.
566             ty::Array(element, mut count) => {
567                 if count.has_projections() {
568                     count = tcx.normalize_erasing_regions(param_env, count);
569                     if count.has_projections() {
570                         return Err(LayoutError::Unknown(ty));
571                     }
572                 }
573
574                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
575                 let element = self.layout_of(element)?;
576                 let size =
577                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
578
579                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
580                     Abi::Uninhabited
581                 } else {
582                     Abi::Aggregate { sized: true }
583                 };
584
585                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
586
587                 tcx.intern_layout(Layout {
588                     variants: Variants::Single { index: VariantIdx::new(0) },
589                     fields: FieldsShape::Array { stride: element.size, count },
590                     abi,
591                     largest_niche,
592                     align: element.align,
593                     size,
594                 })
595             }
596             ty::Slice(element) => {
597                 let element = self.layout_of(element)?;
598                 tcx.intern_layout(Layout {
599                     variants: Variants::Single { index: VariantIdx::new(0) },
600                     fields: FieldsShape::Array { stride: element.size, count: 0 },
601                     abi: Abi::Aggregate { sized: false },
602                     largest_niche: None,
603                     align: element.align,
604                     size: Size::ZERO,
605                 })
606             }
607             ty::Str => tcx.intern_layout(Layout {
608                 variants: Variants::Single { index: VariantIdx::new(0) },
609                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
610                 abi: Abi::Aggregate { sized: false },
611                 largest_niche: None,
612                 align: dl.i8_align,
613                 size: Size::ZERO,
614             }),
615
616             // Odd unit types.
617             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
618             ty::Dynamic(..) | ty::Foreign(..) => {
619                 let mut unit = self.univariant_uninterned(
620                     ty,
621                     &[],
622                     &ReprOptions::default(),
623                     StructKind::AlwaysSized,
624                 )?;
625                 match unit.abi {
626                     Abi::Aggregate { ref mut sized } => *sized = false,
627                     _ => bug!(),
628                 }
629                 tcx.intern_layout(unit)
630             }
631
632             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
633
634             ty::Closure(_, ref substs) => {
635                 let tys = substs.as_closure().upvar_tys();
636                 univariant(
637                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
638                     &ReprOptions::default(),
639                     StructKind::AlwaysSized,
640                 )?
641             }
642
643             ty::Tuple(tys) => {
644                 let kind =
645                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
646
647                 univariant(
648                     &tys.iter()
649                         .map(|k| self.layout_of(k.expect_ty()))
650                         .collect::<Result<Vec<_>, _>>()?,
651                     &ReprOptions::default(),
652                     kind,
653                 )?
654             }
655
656             // SIMD vector types.
657             ty::Adt(def, substs) if def.repr.simd() => {
658                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
659                 //
660                 // * #[repr(simd)] struct S(T, T, T, T);
661                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
662                 // * #[repr(simd)] struct S([T; 4])
663                 //
664                 // where T is a primitive scalar (integer/float/pointer).
665
666                 // SIMD vectors with zero fields are not supported.
667                 // (should be caught by typeck)
668                 if def.non_enum_variant().fields.is_empty() {
669                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
670                 }
671
672                 // Type of the first ADT field:
673                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
674
675                 // Heterogeneous SIMD vectors are not supported:
676                 // (should be caught by typeck)
677                 for fi in &def.non_enum_variant().fields {
678                     if fi.ty(tcx, substs) != f0_ty {
679                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
680                     }
681                 }
682
683                 // The element type and number of elements of the SIMD vector
684                 // are obtained from:
685                 //
686                 // * the element type and length of the single array field, if
687                 // the first field is of array type, or
688                 //
689                 // * the homogenous field type and the number of fields.
690                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
691                     // First ADT field is an array:
692
693                     // SIMD vectors with multiple array fields are not supported:
694                     // (should be caught by typeck)
695                     if def.non_enum_variant().fields.len() != 1 {
696                         tcx.sess.fatal(&format!(
697                             "monomorphising SIMD type `{}` with more than one array field",
698                             ty
699                         ));
700                     }
701
702                     // Extract the number of elements from the layout of the array field:
703                     let len = if let Ok(TyAndLayout {
704                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
705                         ..
706                     }) = self.layout_of(f0_ty)
707                     {
708                         count
709                     } else {
710                         return Err(LayoutError::Unknown(ty));
711                     };
712
713                     (*e_ty, *len, true)
714                 } else {
715                     // First ADT field is not an array:
716                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
717                 };
718
719                 // SIMD vectors of zero length are not supported.
720                 //
721                 // Can't be caught in typeck if the array length is generic.
722                 if e_len == 0 {
723                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
724                 }
725
726                 // Compute the ABI of the element type:
727                 let e_ly = self.layout_of(e_ty)?;
728                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
729                     scalar.clone()
730                 } else {
731                     // This error isn't caught in typeck, e.g., if
732                     // the element type of the vector is generic.
733                     tcx.sess.fatal(&format!(
734                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
735                         (integer/float/pointer) element type `{}`",
736                         ty, e_ty
737                     ))
738                 };
739
740                 // Compute the size and alignment of the vector:
741                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
742                 let align = dl.vector_align(size);
743                 let size = size.align_to(align.abi);
744
745                 // Compute the placement of the vector fields:
746                 let fields = if is_array {
747                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
748                 } else {
749                     FieldsShape::Array { stride: e_ly.size, count: e_len }
750                 };
751
752                 tcx.intern_layout(Layout {
753                     variants: Variants::Single { index: VariantIdx::new(0) },
754                     fields,
755                     abi: Abi::Vector { element: e_abi, count: e_len },
756                     largest_niche: e_ly.largest_niche.clone(),
757                     size,
758                     align,
759                 })
760             }
761
762             // ADTs.
763             ty::Adt(def, substs) => {
764                 // Cache the field layouts.
765                 let variants = def
766                     .variants
767                     .iter()
768                     .map(|v| {
769                         v.fields
770                             .iter()
771                             .map(|field| self.layout_of(field.ty(tcx, substs)))
772                             .collect::<Result<Vec<_>, _>>()
773                     })
774                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
775
776                 if def.is_union() {
777                     if def.repr.pack.is_some() && def.repr.align.is_some() {
778                         bug!("union cannot be packed and aligned");
779                     }
780
781                     let mut align =
782                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
783
784                     if let Some(repr_align) = def.repr.align {
785                         align = align.max(AbiAndPrefAlign::new(repr_align));
786                     }
787
788                     let optimize = !def.repr.inhibit_union_abi_opt();
789                     let mut size = Size::ZERO;
790                     let mut abi = Abi::Aggregate { sized: true };
791                     let index = VariantIdx::new(0);
792                     for field in &variants[index] {
793                         assert!(!field.is_unsized());
794                         align = align.max(field.align);
795
796                         // If all non-ZST fields have the same ABI, forward this ABI
797                         if optimize && !field.is_zst() {
798                             // Normalize scalar_unit to the maximal valid range
799                             let field_abi = match &field.abi {
800                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
801                                 Abi::ScalarPair(x, y) => {
802                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
803                                 }
804                                 Abi::Vector { element: x, count } => {
805                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
806                                 }
807                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
808                                     Abi::Aggregate { sized: true }
809                                 }
810                             };
811
812                             if size == Size::ZERO {
813                                 // first non ZST: initialize 'abi'
814                                 abi = field_abi;
815                             } else if abi != field_abi {
816                                 // different fields have different ABI: reset to Aggregate
817                                 abi = Abi::Aggregate { sized: true };
818                             }
819                         }
820
821                         size = cmp::max(size, field.size);
822                     }
823
824                     if let Some(pack) = def.repr.pack {
825                         align = align.min(AbiAndPrefAlign::new(pack));
826                     }
827
828                     return Ok(tcx.intern_layout(Layout {
829                         variants: Variants::Single { index },
830                         fields: FieldsShape::Union(
831                             NonZeroUsize::new(variants[index].len())
832                                 .ok_or(LayoutError::Unknown(ty))?,
833                         ),
834                         abi,
835                         largest_niche: None,
836                         align,
837                         size: size.align_to(align.abi),
838                     }));
839                 }
840
841                 // A variant is absent if it's uninhabited and only has ZST fields.
842                 // Present uninhabited variants only require space for their fields,
843                 // but *not* an encoding of the discriminant (e.g., a tag value).
844                 // See issue #49298 for more details on the need to leave space
845                 // for non-ZST uninhabited data (mostly partial initialization).
846                 let absent = |fields: &[TyAndLayout<'_>]| {
847                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
848                     let is_zst = fields.iter().all(|f| f.is_zst());
849                     uninhabited && is_zst
850                 };
851                 let (present_first, present_second) = {
852                     let mut present_variants = variants
853                         .iter_enumerated()
854                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
855                     (present_variants.next(), present_variants.next())
856                 };
857                 let present_first = match present_first {
858                     Some(present_first) => present_first,
859                     // Uninhabited because it has no variants, or only absent ones.
860                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
861                     // If it's a struct, still compute a layout so that we can still compute the
862                     // field offsets.
863                     None => VariantIdx::new(0),
864                 };
865
866                 let is_struct = !def.is_enum() ||
867                     // Only one variant is present.
868                     (present_second.is_none() &&
869                     // Representation optimizations are allowed.
870                     !def.repr.inhibit_enum_layout_opt());
871                 if is_struct {
872                     // Struct, or univariant enum equivalent to a struct.
873                     // (Typechecking will reject discriminant-sizing attrs.)
874
875                     let v = present_first;
876                     let kind = if def.is_enum() || variants[v].is_empty() {
877                         StructKind::AlwaysSized
878                     } else {
879                         let param_env = tcx.param_env(def.did);
880                         let last_field = def.variants[v].fields.last().unwrap();
881                         let always_sized =
882                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
883                         if !always_sized {
884                             StructKind::MaybeUnsized
885                         } else {
886                             StructKind::AlwaysSized
887                         }
888                     };
889
890                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
891                     st.variants = Variants::Single { index: v };
892                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
893                     match st.abi {
894                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
895                             // the asserts ensure that we are not using the
896                             // `#[rustc_layout_scalar_valid_range(n)]`
897                             // attribute to widen the range of anything as that would probably
898                             // result in UB somewhere
899                             // FIXME(eddyb) the asserts are probably not needed,
900                             // as larger validity ranges would result in missed
901                             // optimizations, *not* wrongly assuming the inner
902                             // value is valid. e.g. unions enlarge validity ranges,
903                             // because the values may be uninitialized.
904                             if let Bound::Included(start) = start {
905                                 // FIXME(eddyb) this might be incorrect - it doesn't
906                                 // account for wrap-around (end < start) ranges.
907                                 assert!(*scalar.valid_range.start() <= start);
908                                 scalar.valid_range = start..=*scalar.valid_range.end();
909                             }
910                             if let Bound::Included(end) = end {
911                                 // FIXME(eddyb) this might be incorrect - it doesn't
912                                 // account for wrap-around (end < start) ranges.
913                                 assert!(*scalar.valid_range.end() >= end);
914                                 scalar.valid_range = *scalar.valid_range.start()..=end;
915                             }
916
917                             // Update `largest_niche` if we have introduced a larger niche.
918                             let niche = if def.repr.hide_niche() {
919                                 None
920                             } else {
921                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
922                             };
923                             if let Some(niche) = niche {
924                                 match &st.largest_niche {
925                                     Some(largest_niche) => {
926                                         // Replace the existing niche even if they're equal,
927                                         // because this one is at a lower offset.
928                                         if largest_niche.available(dl) <= niche.available(dl) {
929                                             st.largest_niche = Some(niche);
930                                         }
931                                     }
932                                     None => st.largest_niche = Some(niche),
933                                 }
934                             }
935                         }
936                         _ => assert!(
937                             start == Bound::Unbounded && end == Bound::Unbounded,
938                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
939                             def,
940                             st,
941                         ),
942                     }
943
944                     return Ok(tcx.intern_layout(st));
945                 }
946
947                 // At this point, we have handled all unions and
948                 // structs. (We have also handled univariant enums
949                 // that allow representation optimization.)
950                 assert!(def.is_enum());
951
952                 // The current code for niche-filling relies on variant indices
953                 // instead of actual discriminants, so dataful enums with
954                 // explicit discriminants (RFC #2363) would misbehave.
955                 let no_explicit_discriminants = def
956                     .variants
957                     .iter_enumerated()
958                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
959
960                 let mut niche_filling_layout = None;
961
962                 // Niche-filling enum optimization.
963                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
964                     let mut dataful_variant = None;
965                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
966
967                     // Find one non-ZST variant.
968                     'variants: for (v, fields) in variants.iter_enumerated() {
969                         if absent(fields) {
970                             continue 'variants;
971                         }
972                         for f in fields {
973                             if !f.is_zst() {
974                                 if dataful_variant.is_none() {
975                                     dataful_variant = Some(v);
976                                     continue 'variants;
977                                 } else {
978                                     dataful_variant = None;
979                                     break 'variants;
980                                 }
981                             }
982                         }
983                         niche_variants = *niche_variants.start().min(&v)..=v;
984                     }
985
986                     if niche_variants.start() > niche_variants.end() {
987                         dataful_variant = None;
988                     }
989
990                     if let Some(i) = dataful_variant {
991                         let count = (niche_variants.end().as_u32()
992                             - niche_variants.start().as_u32()
993                             + 1) as u128;
994
995                         // Find the field with the largest niche
996                         let niche_candidate = variants[i]
997                             .iter()
998                             .enumerate()
999                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1000                             .max_by_key(|(_, niche)| niche.available(dl));
1001
1002                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1003                             niche_candidate.and_then(|(field_index, niche)| {
1004                                 Some((field_index, niche, niche.reserve(self, count)?))
1005                             })
1006                         {
1007                             let mut align = dl.aggregate_align;
1008                             let st = variants
1009                                 .iter_enumerated()
1010                                 .map(|(j, v)| {
1011                                     let mut st = self.univariant_uninterned(
1012                                         ty,
1013                                         v,
1014                                         &def.repr,
1015                                         StructKind::AlwaysSized,
1016                                     )?;
1017                                     st.variants = Variants::Single { index: j };
1018
1019                                     align = align.max(st.align);
1020
1021                                     Ok(st)
1022                                 })
1023                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1024
1025                             let offset = st[i].fields.offset(field_index) + niche.offset;
1026                             let size = st[i].size;
1027
1028                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1029                                 Abi::Uninhabited
1030                             } else {
1031                                 match st[i].abi {
1032                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1033                                     Abi::ScalarPair(ref first, ref second) => {
1034                                         // We need to use scalar_unit to reset the
1035                                         // valid range to the maximal one for that
1036                                         // primitive, because only the niche is
1037                                         // guaranteed to be initialised, not the
1038                                         // other primitive.
1039                                         if offset.bytes() == 0 {
1040                                             Abi::ScalarPair(
1041                                                 niche_scalar.clone(),
1042                                                 scalar_unit(second.value),
1043                                             )
1044                                         } else {
1045                                             Abi::ScalarPair(
1046                                                 scalar_unit(first.value),
1047                                                 niche_scalar.clone(),
1048                                             )
1049                                         }
1050                                     }
1051                                     _ => Abi::Aggregate { sized: true },
1052                                 }
1053                             };
1054
1055                             let largest_niche =
1056                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1057
1058                             niche_filling_layout = Some(Layout {
1059                                 variants: Variants::Multiple {
1060                                     tag: niche_scalar,
1061                                     tag_encoding: TagEncoding::Niche {
1062                                         dataful_variant: i,
1063                                         niche_variants,
1064                                         niche_start,
1065                                     },
1066                                     tag_field: 0,
1067                                     variants: st,
1068                                 },
1069                                 fields: FieldsShape::Arbitrary {
1070                                     offsets: vec![offset],
1071                                     memory_index: vec![0],
1072                                 },
1073                                 abi,
1074                                 largest_niche,
1075                                 size,
1076                                 align,
1077                             });
1078                         }
1079                     }
1080                 }
1081
1082                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1083                 let discr_type = def.repr.discr_type();
1084                 let bits = Integer::from_attr(self, discr_type).size().bits();
1085                 for (i, discr) in def.discriminants(tcx) {
1086                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1087                         continue;
1088                     }
1089                     let mut x = discr.val as i128;
1090                     if discr_type.is_signed() {
1091                         // sign extend the raw representation to be an i128
1092                         x = (x << (128 - bits)) >> (128 - bits);
1093                     }
1094                     if x < min {
1095                         min = x;
1096                     }
1097                     if x > max {
1098                         max = x;
1099                     }
1100                 }
1101                 // We might have no inhabited variants, so pretend there's at least one.
1102                 if (min, max) == (i128::MAX, i128::MIN) {
1103                     min = 0;
1104                     max = 0;
1105                 }
1106                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1107                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1108
1109                 let mut align = dl.aggregate_align;
1110                 let mut size = Size::ZERO;
1111
1112                 // We're interested in the smallest alignment, so start large.
1113                 let mut start_align = Align::from_bytes(256).unwrap();
1114                 assert_eq!(Integer::for_align(dl, start_align), None);
1115
1116                 // repr(C) on an enum tells us to make a (tag, union) layout,
1117                 // so we need to grow the prefix alignment to be at least
1118                 // the alignment of the union. (This value is used both for
1119                 // determining the alignment of the overall enum, and the
1120                 // determining the alignment of the payload after the tag.)
1121                 let mut prefix_align = min_ity.align(dl).abi;
1122                 if def.repr.c() {
1123                     for fields in &variants {
1124                         for field in fields {
1125                             prefix_align = prefix_align.max(field.align.abi);
1126                         }
1127                     }
1128                 }
1129
1130                 // Create the set of structs that represent each variant.
1131                 let mut layout_variants = variants
1132                     .iter_enumerated()
1133                     .map(|(i, field_layouts)| {
1134                         let mut st = self.univariant_uninterned(
1135                             ty,
1136                             &field_layouts,
1137                             &def.repr,
1138                             StructKind::Prefixed(min_ity.size(), prefix_align),
1139                         )?;
1140                         st.variants = Variants::Single { index: i };
1141                         // Find the first field we can't move later
1142                         // to make room for a larger discriminant.
1143                         for field in
1144                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1145                         {
1146                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1147                                 start_align = start_align.min(field.align.abi);
1148                                 break;
1149                             }
1150                         }
1151                         size = cmp::max(size, st.size);
1152                         align = align.max(st.align);
1153                         Ok(st)
1154                     })
1155                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1156
1157                 // Align the maximum variant size to the largest alignment.
1158                 size = size.align_to(align.abi);
1159
1160                 if size.bytes() >= dl.obj_size_bound() {
1161                     return Err(LayoutError::SizeOverflow(ty));
1162                 }
1163
1164                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1165                 if typeck_ity < min_ity {
1166                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1167                     // some reason at this point (based on values discriminant can take on). Mostly
1168                     // because this discriminant will be loaded, and then stored into variable of
1169                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1170                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1171                     // discriminant values. That would be a bug, because then, in codegen, in order
1172                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1173                     // space necessary to represent would have to be discarded (or layout is wrong
1174                     // on thinking it needs 16 bits)
1175                     bug!(
1176                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1177                         min_ity,
1178                         typeck_ity
1179                     );
1180                     // However, it is fine to make discr type however large (as an optimisation)
1181                     // after this point â€“ we’ll just truncate the value we load in codegen.
1182                 }
1183
1184                 // Check to see if we should use a different type for the
1185                 // discriminant. We can safely use a type with the same size
1186                 // as the alignment of the first field of each variant.
1187                 // We increase the size of the discriminant to avoid LLVM copying
1188                 // padding when it doesn't need to. This normally causes unaligned
1189                 // load/stores and excessive memcpy/memset operations. By using a
1190                 // bigger integer size, LLVM can be sure about its contents and
1191                 // won't be so conservative.
1192
1193                 // Use the initial field alignment
1194                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1195                     min_ity
1196                 } else {
1197                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1198                 };
1199
1200                 // If the alignment is not larger than the chosen discriminant size,
1201                 // don't use the alignment as the final size.
1202                 if ity <= min_ity {
1203                     ity = min_ity;
1204                 } else {
1205                     // Patch up the variants' first few fields.
1206                     let old_ity_size = min_ity.size();
1207                     let new_ity_size = ity.size();
1208                     for variant in &mut layout_variants {
1209                         match variant.fields {
1210                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1211                                 for i in offsets {
1212                                     if *i <= old_ity_size {
1213                                         assert_eq!(*i, old_ity_size);
1214                                         *i = new_ity_size;
1215                                     }
1216                                 }
1217                                 // We might be making the struct larger.
1218                                 if variant.size <= old_ity_size {
1219                                     variant.size = new_ity_size;
1220                                 }
1221                             }
1222                             _ => bug!(),
1223                         }
1224                     }
1225                 }
1226
1227                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1228                 let tag = Scalar {
1229                     value: Int(ity, signed),
1230                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1231                 };
1232                 let mut abi = Abi::Aggregate { sized: true };
1233                 if tag.value.size(dl) == size {
1234                     abi = Abi::Scalar(tag.clone());
1235                 } else {
1236                     // Try to use a ScalarPair for all tagged enums.
1237                     let mut common_prim = None;
1238                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1239                         let offsets = match layout_variant.fields {
1240                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1241                             _ => bug!(),
1242                         };
1243                         let mut fields =
1244                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1245                         let (field, offset) = match (fields.next(), fields.next()) {
1246                             (None, None) => continue,
1247                             (Some(pair), None) => pair,
1248                             _ => {
1249                                 common_prim = None;
1250                                 break;
1251                             }
1252                         };
1253                         let prim = match field.abi {
1254                             Abi::Scalar(ref scalar) => scalar.value,
1255                             _ => {
1256                                 common_prim = None;
1257                                 break;
1258                             }
1259                         };
1260                         if let Some(pair) = common_prim {
1261                             // This is pretty conservative. We could go fancier
1262                             // by conflating things like i32 and u32, or even
1263                             // realising that (u8, u8) could just cohabit with
1264                             // u16 or even u32.
1265                             if pair != (prim, offset) {
1266                                 common_prim = None;
1267                                 break;
1268                             }
1269                         } else {
1270                             common_prim = Some((prim, offset));
1271                         }
1272                     }
1273                     if let Some((prim, offset)) = common_prim {
1274                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1275                         let pair_offsets = match pair.fields {
1276                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1277                                 assert_eq!(memory_index, &[0, 1]);
1278                                 offsets
1279                             }
1280                             _ => bug!(),
1281                         };
1282                         if pair_offsets[0] == Size::ZERO
1283                             && pair_offsets[1] == *offset
1284                             && align == pair.align
1285                             && size == pair.size
1286                         {
1287                             // We can use `ScalarPair` only when it matches our
1288                             // already computed layout (including `#[repr(C)]`).
1289                             abi = pair.abi;
1290                         }
1291                     }
1292                 }
1293
1294                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1295                     abi = Abi::Uninhabited;
1296                 }
1297
1298                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1299
1300                 let tagged_layout = Layout {
1301                     variants: Variants::Multiple {
1302                         tag,
1303                         tag_encoding: TagEncoding::Direct,
1304                         tag_field: 0,
1305                         variants: layout_variants,
1306                     },
1307                     fields: FieldsShape::Arbitrary {
1308                         offsets: vec![Size::ZERO],
1309                         memory_index: vec![0],
1310                     },
1311                     largest_niche,
1312                     abi,
1313                     align,
1314                     size,
1315                 };
1316
1317                 let best_layout = match (tagged_layout, niche_filling_layout) {
1318                     (tagged_layout, Some(niche_filling_layout)) => {
1319                         // Pick the smaller layout; otherwise,
1320                         // pick the layout with the larger niche; otherwise,
1321                         // pick tagged as it has simpler codegen.
1322                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1323                             let niche_size =
1324                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1325                             (layout.size, cmp::Reverse(niche_size))
1326                         })
1327                     }
1328                     (tagged_layout, None) => tagged_layout,
1329                 };
1330
1331                 tcx.intern_layout(best_layout)
1332             }
1333
1334             // Types with no meaningful known layout.
1335             ty::Projection(_) | ty::Opaque(..) => {
1336                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1337                 if ty == normalized {
1338                     return Err(LayoutError::Unknown(ty));
1339                 }
1340                 tcx.layout_raw(param_env.and(normalized))?
1341             }
1342
1343             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1344                 bug!("Layout::compute: unexpected type `{}`", ty)
1345             }
1346
1347             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1348                 return Err(LayoutError::Unknown(ty));
1349             }
1350         })
1351     }
1352 }
1353
1354 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1355 #[derive(Clone, Debug, PartialEq)]
1356 enum SavedLocalEligibility {
1357     Unassigned,
1358     Assigned(VariantIdx),
1359     // FIXME: Use newtype_index so we aren't wasting bytes
1360     Ineligible(Option<u32>),
1361 }
1362
1363 // When laying out generators, we divide our saved local fields into two
1364 // categories: overlap-eligible and overlap-ineligible.
1365 //
1366 // Those fields which are ineligible for overlap go in a "prefix" at the
1367 // beginning of the layout, and always have space reserved for them.
1368 //
1369 // Overlap-eligible fields are only assigned to one variant, so we lay
1370 // those fields out for each variant and put them right after the
1371 // prefix.
1372 //
1373 // Finally, in the layout details, we point to the fields from the
1374 // variants they are assigned to. It is possible for some fields to be
1375 // included in multiple variants. No field ever "moves around" in the
1376 // layout; its offset is always the same.
1377 //
1378 // Also included in the layout are the upvars and the discriminant.
1379 // These are included as fields on the "outer" layout; they are not part
1380 // of any variant.
1381 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1382     /// Compute the eligibility and assignment of each local.
1383     fn generator_saved_local_eligibility(
1384         &self,
1385         info: &GeneratorLayout<'tcx>,
1386     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1387         use SavedLocalEligibility::*;
1388
1389         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1390             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1391
1392         // The saved locals not eligible for overlap. These will get
1393         // "promoted" to the prefix of our generator.
1394         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1395
1396         // Figure out which of our saved locals are fields in only
1397         // one variant. The rest are deemed ineligible for overlap.
1398         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1399             for local in fields {
1400                 match assignments[*local] {
1401                     Unassigned => {
1402                         assignments[*local] = Assigned(variant_index);
1403                     }
1404                     Assigned(idx) => {
1405                         // We've already seen this local at another suspension
1406                         // point, so it is no longer a candidate.
1407                         trace!(
1408                             "removing local {:?} in >1 variant ({:?}, {:?})",
1409                             local,
1410                             variant_index,
1411                             idx
1412                         );
1413                         ineligible_locals.insert(*local);
1414                         assignments[*local] = Ineligible(None);
1415                     }
1416                     Ineligible(_) => {}
1417                 }
1418             }
1419         }
1420
1421         // Next, check every pair of eligible locals to see if they
1422         // conflict.
1423         for local_a in info.storage_conflicts.rows() {
1424             let conflicts_a = info.storage_conflicts.count(local_a);
1425             if ineligible_locals.contains(local_a) {
1426                 continue;
1427             }
1428
1429             for local_b in info.storage_conflicts.iter(local_a) {
1430                 // local_a and local_b are storage live at the same time, therefore they
1431                 // cannot overlap in the generator layout. The only way to guarantee
1432                 // this is if they are in the same variant, or one is ineligible
1433                 // (which means it is stored in every variant).
1434                 if ineligible_locals.contains(local_b)
1435                     || assignments[local_a] == assignments[local_b]
1436                 {
1437                     continue;
1438                 }
1439
1440                 // If they conflict, we will choose one to make ineligible.
1441                 // This is not always optimal; it's just a greedy heuristic that
1442                 // seems to produce good results most of the time.
1443                 let conflicts_b = info.storage_conflicts.count(local_b);
1444                 let (remove, other) =
1445                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1446                 ineligible_locals.insert(remove);
1447                 assignments[remove] = Ineligible(None);
1448                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1449             }
1450         }
1451
1452         // Count the number of variants in use. If only one of them, then it is
1453         // impossible to overlap any locals in our layout. In this case it's
1454         // always better to make the remaining locals ineligible, so we can
1455         // lay them out with the other locals in the prefix and eliminate
1456         // unnecessary padding bytes.
1457         {
1458             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1459             for assignment in &assignments {
1460                 if let Assigned(idx) = assignment {
1461                     used_variants.insert(*idx);
1462                 }
1463             }
1464             if used_variants.count() < 2 {
1465                 for assignment in assignments.iter_mut() {
1466                     *assignment = Ineligible(None);
1467                 }
1468                 ineligible_locals.insert_all();
1469             }
1470         }
1471
1472         // Write down the order of our locals that will be promoted to the prefix.
1473         {
1474             for (idx, local) in ineligible_locals.iter().enumerate() {
1475                 assignments[local] = Ineligible(Some(idx as u32));
1476             }
1477         }
1478         debug!("generator saved local assignments: {:?}", assignments);
1479
1480         (ineligible_locals, assignments)
1481     }
1482
1483     /// Compute the full generator layout.
1484     fn generator_layout(
1485         &self,
1486         ty: Ty<'tcx>,
1487         def_id: hir::def_id::DefId,
1488         substs: SubstsRef<'tcx>,
1489     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1490         use SavedLocalEligibility::*;
1491         let tcx = self.tcx;
1492         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1493
1494         let info = match tcx.generator_layout(def_id) {
1495             None => return Err(LayoutError::Unknown(ty)),
1496             Some(info) => info,
1497         };
1498         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1499
1500         // Build a prefix layout, including "promoting" all ineligible
1501         // locals as part of the prefix. We compute the layout of all of
1502         // these fields at once to get optimal packing.
1503         let tag_index = substs.as_generator().prefix_tys().count();
1504
1505         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1506         let max_discr = (info.variant_fields.len() - 1) as u128;
1507         let discr_int = Integer::fit_unsigned(max_discr);
1508         let discr_int_ty = discr_int.to_ty(tcx, false);
1509         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1510         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1511         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1512
1513         let promoted_layouts = ineligible_locals
1514             .iter()
1515             .map(|local| subst_field(info.field_tys[local]))
1516             .map(|ty| tcx.mk_maybe_uninit(ty))
1517             .map(|ty| self.layout_of(ty));
1518         let prefix_layouts = substs
1519             .as_generator()
1520             .prefix_tys()
1521             .map(|ty| self.layout_of(ty))
1522             .chain(iter::once(Ok(tag_layout)))
1523             .chain(promoted_layouts)
1524             .collect::<Result<Vec<_>, _>>()?;
1525         let prefix = self.univariant_uninterned(
1526             ty,
1527             &prefix_layouts,
1528             &ReprOptions::default(),
1529             StructKind::AlwaysSized,
1530         )?;
1531
1532         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1533
1534         // Split the prefix layout into the "outer" fields (upvars and
1535         // discriminant) and the "promoted" fields. Promoted fields will
1536         // get included in each variant that requested them in
1537         // GeneratorLayout.
1538         debug!("prefix = {:#?}", prefix);
1539         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1540             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1541                 let mut inverse_memory_index = invert_mapping(&memory_index);
1542
1543                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1544                 // "outer" and "promoted" fields respectively.
1545                 let b_start = (tag_index + 1) as u32;
1546                 let offsets_b = offsets.split_off(b_start as usize);
1547                 let offsets_a = offsets;
1548
1549                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1550                 // by preserving the order but keeping only one disjoint "half" each.
1551                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1552                 let inverse_memory_index_b: Vec<_> =
1553                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1554                 inverse_memory_index.retain(|&i| i < b_start);
1555                 let inverse_memory_index_a = inverse_memory_index;
1556
1557                 // Since `inverse_memory_index_{a,b}` each only refer to their
1558                 // respective fields, they can be safely inverted
1559                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1560                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1561
1562                 let outer_fields =
1563                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1564                 (outer_fields, offsets_b, memory_index_b)
1565             }
1566             _ => bug!(),
1567         };
1568
1569         let mut size = prefix.size;
1570         let mut align = prefix.align;
1571         let variants = info
1572             .variant_fields
1573             .iter_enumerated()
1574             .map(|(index, variant_fields)| {
1575                 // Only include overlap-eligible fields when we compute our variant layout.
1576                 let variant_only_tys = variant_fields
1577                     .iter()
1578                     .filter(|local| match assignments[**local] {
1579                         Unassigned => bug!(),
1580                         Assigned(v) if v == index => true,
1581                         Assigned(_) => bug!("assignment does not match variant"),
1582                         Ineligible(_) => false,
1583                     })
1584                     .map(|local| subst_field(info.field_tys[*local]));
1585
1586                 let mut variant = self.univariant_uninterned(
1587                     ty,
1588                     &variant_only_tys
1589                         .map(|ty| self.layout_of(ty))
1590                         .collect::<Result<Vec<_>, _>>()?,
1591                     &ReprOptions::default(),
1592                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1593                 )?;
1594                 variant.variants = Variants::Single { index };
1595
1596                 let (offsets, memory_index) = match variant.fields {
1597                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1598                     _ => bug!(),
1599                 };
1600
1601                 // Now, stitch the promoted and variant-only fields back together in
1602                 // the order they are mentioned by our GeneratorLayout.
1603                 // Because we only use some subset (that can differ between variants)
1604                 // of the promoted fields, we can't just pick those elements of the
1605                 // `promoted_memory_index` (as we'd end up with gaps).
1606                 // So instead, we build an "inverse memory_index", as if all of the
1607                 // promoted fields were being used, but leave the elements not in the
1608                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1609                 // obtain a valid (bijective) mapping.
1610                 const INVALID_FIELD_IDX: u32 = !0;
1611                 let mut combined_inverse_memory_index =
1612                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1613                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1614                 let combined_offsets = variant_fields
1615                     .iter()
1616                     .enumerate()
1617                     .map(|(i, local)| {
1618                         let (offset, memory_index) = match assignments[*local] {
1619                             Unassigned => bug!(),
1620                             Assigned(_) => {
1621                                 let (offset, memory_index) =
1622                                     offsets_and_memory_index.next().unwrap();
1623                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1624                             }
1625                             Ineligible(field_idx) => {
1626                                 let field_idx = field_idx.unwrap() as usize;
1627                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1628                             }
1629                         };
1630                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1631                         offset
1632                     })
1633                     .collect();
1634
1635                 // Remove the unused slots and invert the mapping to obtain the
1636                 // combined `memory_index` (also see previous comment).
1637                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1638                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1639
1640                 variant.fields = FieldsShape::Arbitrary {
1641                     offsets: combined_offsets,
1642                     memory_index: combined_memory_index,
1643                 };
1644
1645                 size = size.max(variant.size);
1646                 align = align.max(variant.align);
1647                 Ok(variant)
1648             })
1649             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1650
1651         size = size.align_to(align.abi);
1652
1653         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1654         {
1655             Abi::Uninhabited
1656         } else {
1657             Abi::Aggregate { sized: true }
1658         };
1659
1660         let layout = tcx.intern_layout(Layout {
1661             variants: Variants::Multiple {
1662                 tag,
1663                 tag_encoding: TagEncoding::Direct,
1664                 tag_field: tag_index,
1665                 variants,
1666             },
1667             fields: outer_fields,
1668             abi,
1669             largest_niche: prefix.largest_niche,
1670             size,
1671             align,
1672         });
1673         debug!("generator layout ({:?}): {:#?}", ty, layout);
1674         Ok(layout)
1675     }
1676
1677     /// This is invoked by the `layout_raw` query to record the final
1678     /// layout of each type.
1679     #[inline(always)]
1680     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1681         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1682         // for dumping later.
1683         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1684             self.record_layout_for_printing_outlined(layout)
1685         }
1686     }
1687
1688     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1689         // Ignore layouts that are done with non-empty environments or
1690         // non-monomorphic layouts, as the user only wants to see the stuff
1691         // resulting from the final codegen session.
1692         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1693             return;
1694         }
1695
1696         // (delay format until we actually need it)
1697         let record = |kind, packed, opt_discr_size, variants| {
1698             let type_desc = format!("{:?}", layout.ty);
1699             self.tcx.sess.code_stats.record_type_size(
1700                 kind,
1701                 type_desc,
1702                 layout.align.abi,
1703                 layout.size,
1704                 packed,
1705                 opt_discr_size,
1706                 variants,
1707             );
1708         };
1709
1710         let adt_def = match *layout.ty.kind() {
1711             ty::Adt(ref adt_def, _) => {
1712                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1713                 adt_def
1714             }
1715
1716             ty::Closure(..) => {
1717                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1718                 record(DataTypeKind::Closure, false, None, vec![]);
1719                 return;
1720             }
1721
1722             _ => {
1723                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1724                 return;
1725             }
1726         };
1727
1728         let adt_kind = adt_def.adt_kind();
1729         let adt_packed = adt_def.repr.pack.is_some();
1730
1731         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1732             let mut min_size = Size::ZERO;
1733             let field_info: Vec<_> = flds
1734                 .iter()
1735                 .enumerate()
1736                 .map(|(i, &name)| match layout.field(self, i) {
1737                     Err(err) => {
1738                         bug!("no layout found for field {}: `{:?}`", name, err);
1739                     }
1740                     Ok(field_layout) => {
1741                         let offset = layout.fields.offset(i);
1742                         let field_end = offset + field_layout.size;
1743                         if min_size < field_end {
1744                             min_size = field_end;
1745                         }
1746                         FieldInfo {
1747                             name: name.to_string(),
1748                             offset: offset.bytes(),
1749                             size: field_layout.size.bytes(),
1750                             align: field_layout.align.abi.bytes(),
1751                         }
1752                     }
1753                 })
1754                 .collect();
1755
1756             VariantInfo {
1757                 name: n.map(|n| n.to_string()),
1758                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1759                 align: layout.align.abi.bytes(),
1760                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1761                 fields: field_info,
1762             }
1763         };
1764
1765         match layout.variants {
1766             Variants::Single { index } => {
1767                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1768                 if !adt_def.variants.is_empty() {
1769                     let variant_def = &adt_def.variants[index];
1770                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1771                     record(
1772                         adt_kind.into(),
1773                         adt_packed,
1774                         None,
1775                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1776                     );
1777                 } else {
1778                     // (This case arises for *empty* enums; so give it
1779                     // zero variants.)
1780                     record(adt_kind.into(), adt_packed, None, vec![]);
1781                 }
1782             }
1783
1784             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1785                 debug!(
1786                     "print-type-size `{:#?}` adt general variants def {}",
1787                     layout.ty,
1788                     adt_def.variants.len()
1789                 );
1790                 let variant_infos: Vec<_> = adt_def
1791                     .variants
1792                     .iter_enumerated()
1793                     .map(|(i, variant_def)| {
1794                         let fields: Vec<_> =
1795                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1796                         build_variant_info(
1797                             Some(variant_def.ident),
1798                             &fields,
1799                             layout.for_variant(self, i),
1800                         )
1801                     })
1802                     .collect();
1803                 record(
1804                     adt_kind.into(),
1805                     adt_packed,
1806                     match tag_encoding {
1807                         TagEncoding::Direct => Some(tag.value.size(self)),
1808                         _ => None,
1809                     },
1810                     variant_infos,
1811                 );
1812             }
1813         }
1814     }
1815 }
1816
1817 /// Type size "skeleton", i.e., the only information determining a type's size.
1818 /// While this is conservative, (aside from constant sizes, only pointers,
1819 /// newtypes thereof and null pointer optimized enums are allowed), it is
1820 /// enough to statically check common use cases of transmute.
1821 #[derive(Copy, Clone, Debug)]
1822 pub enum SizeSkeleton<'tcx> {
1823     /// Any statically computable Layout.
1824     Known(Size),
1825
1826     /// A potentially-fat pointer.
1827     Pointer {
1828         /// If true, this pointer is never null.
1829         non_zero: bool,
1830         /// The type which determines the unsized metadata, if any,
1831         /// of this pointer. Either a type parameter or a projection
1832         /// depending on one, with regions erased.
1833         tail: Ty<'tcx>,
1834     },
1835 }
1836
1837 impl<'tcx> SizeSkeleton<'tcx> {
1838     pub fn compute(
1839         ty: Ty<'tcx>,
1840         tcx: TyCtxt<'tcx>,
1841         param_env: ty::ParamEnv<'tcx>,
1842     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1843         debug_assert!(!ty.has_infer_types_or_consts());
1844
1845         // First try computing a static layout.
1846         let err = match tcx.layout_of(param_env.and(ty)) {
1847             Ok(layout) => {
1848                 return Ok(SizeSkeleton::Known(layout.size));
1849             }
1850             Err(err) => err,
1851         };
1852
1853         match *ty.kind() {
1854             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1855                 let non_zero = !ty.is_unsafe_ptr();
1856                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1857                 match tail.kind() {
1858                     ty::Param(_) | ty::Projection(_) => {
1859                         debug_assert!(tail.has_param_types_or_consts());
1860                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1861                     }
1862                     _ => bug!(
1863                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1864                               tail `{}` is not a type parameter or a projection",
1865                         ty,
1866                         err,
1867                         tail
1868                     ),
1869                 }
1870             }
1871
1872             ty::Adt(def, substs) => {
1873                 // Only newtypes and enums w/ nullable pointer optimization.
1874                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1875                     return Err(err);
1876                 }
1877
1878                 // Get a zero-sized variant or a pointer newtype.
1879                 let zero_or_ptr_variant = |i| {
1880                     let i = VariantIdx::new(i);
1881                     let fields = def.variants[i]
1882                         .fields
1883                         .iter()
1884                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1885                     let mut ptr = None;
1886                     for field in fields {
1887                         let field = field?;
1888                         match field {
1889                             SizeSkeleton::Known(size) => {
1890                                 if size.bytes() > 0 {
1891                                     return Err(err);
1892                                 }
1893                             }
1894                             SizeSkeleton::Pointer { .. } => {
1895                                 if ptr.is_some() {
1896                                     return Err(err);
1897                                 }
1898                                 ptr = Some(field);
1899                             }
1900                         }
1901                     }
1902                     Ok(ptr)
1903                 };
1904
1905                 let v0 = zero_or_ptr_variant(0)?;
1906                 // Newtype.
1907                 if def.variants.len() == 1 {
1908                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1909                         return Ok(SizeSkeleton::Pointer {
1910                             non_zero: non_zero
1911                                 || match tcx.layout_scalar_valid_range(def.did) {
1912                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1913                                     (Bound::Included(start), Bound::Included(end)) => {
1914                                         0 < start && start < end
1915                                     }
1916                                     _ => false,
1917                                 },
1918                             tail,
1919                         });
1920                     } else {
1921                         return Err(err);
1922                     }
1923                 }
1924
1925                 let v1 = zero_or_ptr_variant(1)?;
1926                 // Nullable pointer enum optimization.
1927                 match (v0, v1) {
1928                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1929                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1930                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1931                     }
1932                     _ => Err(err),
1933                 }
1934             }
1935
1936             ty::Projection(_) | ty::Opaque(..) => {
1937                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1938                 if ty == normalized {
1939                     Err(err)
1940                 } else {
1941                     SizeSkeleton::compute(normalized, tcx, param_env)
1942                 }
1943             }
1944
1945             _ => Err(err),
1946         }
1947     }
1948
1949     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1950         match (self, other) {
1951             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1952             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1953                 a == b
1954             }
1955             _ => false,
1956         }
1957     }
1958 }
1959
1960 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1961     fn tcx(&self) -> TyCtxt<'tcx>;
1962 }
1963
1964 pub trait HasParamEnv<'tcx> {
1965     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1966 }
1967
1968 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1969     fn data_layout(&self) -> &TargetDataLayout {
1970         &self.data_layout
1971     }
1972 }
1973
1974 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1975     fn tcx(&self) -> TyCtxt<'tcx> {
1976         *self
1977     }
1978 }
1979
1980 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1981     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1982         self.param_env
1983     }
1984 }
1985
1986 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1987     fn data_layout(&self) -> &TargetDataLayout {
1988         self.tcx.data_layout()
1989     }
1990 }
1991
1992 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1993     fn tcx(&self) -> TyCtxt<'tcx> {
1994         self.tcx.tcx()
1995     }
1996 }
1997
1998 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1999
2000 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
2001     type Ty = Ty<'tcx>;
2002     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2003
2004     /// Computes the layout of a type. Note that this implicitly
2005     /// executes in "reveal all" mode.
2006     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2007         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
2008         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2009         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2010         let layout = TyAndLayout { ty, layout };
2011
2012         // N.B., this recording is normally disabled; when enabled, it
2013         // can however trigger recursive invocations of `layout_of`.
2014         // Therefore, we execute it *after* the main query has
2015         // completed, to avoid problems around recursive structures
2016         // and the like. (Admittedly, I wasn't able to reproduce a problem
2017         // here, but it seems like the right thing to do. -nmatsakis)
2018         self.record_layout_for_printing(layout);
2019
2020         Ok(layout)
2021     }
2022 }
2023
2024 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2025     type Ty = Ty<'tcx>;
2026     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2027
2028     /// Computes the layout of a type. Note that this implicitly
2029     /// executes in "reveal all" mode.
2030     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2031         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2032         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2033         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2034         let layout = TyAndLayout { ty, layout };
2035
2036         // N.B., this recording is normally disabled; when enabled, it
2037         // can however trigger recursive invocations of `layout_of`.
2038         // Therefore, we execute it *after* the main query has
2039         // completed, to avoid problems around recursive structures
2040         // and the like. (Admittedly, I wasn't able to reproduce a problem
2041         // here, but it seems like the right thing to do. -nmatsakis)
2042         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2043         cx.record_layout_for_printing(layout);
2044
2045         Ok(layout)
2046     }
2047 }
2048
2049 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2050 impl TyCtxt<'tcx> {
2051     /// Computes the layout of a type. Note that this implicitly
2052     /// executes in "reveal all" mode.
2053     #[inline]
2054     pub fn layout_of(
2055         self,
2056         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2057     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2058         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2059         cx.layout_of(param_env_and_ty.value)
2060     }
2061 }
2062
2063 impl ty::query::TyCtxtAt<'tcx> {
2064     /// Computes the layout of a type. Note that this implicitly
2065     /// executes in "reveal all" mode.
2066     #[inline]
2067     pub fn layout_of(
2068         self,
2069         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2070     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2071         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2072         cx.layout_of(param_env_and_ty.value)
2073     }
2074 }
2075
2076 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2077 where
2078     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2079         + HasTyCtxt<'tcx>
2080         + HasParamEnv<'tcx>,
2081 {
2082     fn for_variant(
2083         this: TyAndLayout<'tcx>,
2084         cx: &C,
2085         variant_index: VariantIdx,
2086     ) -> TyAndLayout<'tcx> {
2087         let layout = match this.variants {
2088             Variants::Single { index }
2089                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2090                 if index == variant_index &&
2091                 // Don't confuse variants of uninhabited enums with the enum itself.
2092                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2093                 this.fields != FieldsShape::Primitive =>
2094             {
2095                 this.layout
2096             }
2097
2098             Variants::Single { index } => {
2099                 // Deny calling for_variant more than once for non-Single enums.
2100                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2101                     assert_eq!(original_layout.variants, Variants::Single { index });
2102                 }
2103
2104                 let fields = match this.ty.kind() {
2105                     ty::Adt(def, _) if def.variants.is_empty() =>
2106                         bug!("for_variant called on zero-variant enum"),
2107                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2108                     _ => bug!(),
2109                 };
2110                 let tcx = cx.tcx();
2111                 tcx.intern_layout(Layout {
2112                     variants: Variants::Single { index: variant_index },
2113                     fields: match NonZeroUsize::new(fields) {
2114                         Some(fields) => FieldsShape::Union(fields),
2115                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2116                     },
2117                     abi: Abi::Uninhabited,
2118                     largest_niche: None,
2119                     align: tcx.data_layout.i8_align,
2120                     size: Size::ZERO,
2121                 })
2122             }
2123
2124             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2125         };
2126
2127         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2128
2129         TyAndLayout { ty: this.ty, layout }
2130     }
2131
2132     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2133         enum TyMaybeWithLayout<C: LayoutOf> {
2134             Ty(C::Ty),
2135             TyAndLayout(C::TyAndLayout),
2136         }
2137
2138         fn ty_and_layout_kind<
2139             C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2140                 + HasTyCtxt<'tcx>
2141                 + HasParamEnv<'tcx>,
2142         >(
2143             this: TyAndLayout<'tcx>,
2144             cx: &C,
2145             i: usize,
2146             ty: C::Ty,
2147         ) -> TyMaybeWithLayout<C> {
2148             let tcx = cx.tcx();
2149             let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2150                 let layout = Layout::scalar(cx, tag.clone());
2151                 MaybeResult::from(Ok(TyAndLayout {
2152                     layout: tcx.intern_layout(layout),
2153                     ty: tag.value.to_ty(tcx),
2154                 }))
2155             };
2156
2157             match *ty.kind() {
2158                 ty::Bool
2159                 | ty::Char
2160                 | ty::Int(_)
2161                 | ty::Uint(_)
2162                 | ty::Float(_)
2163                 | ty::FnPtr(_)
2164                 | ty::Never
2165                 | ty::FnDef(..)
2166                 | ty::GeneratorWitness(..)
2167                 | ty::Foreign(..)
2168                 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2169
2170                 // Potentially-fat pointers.
2171                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2172                     assert!(i < this.fields.count());
2173
2174                     // Reuse the fat `*T` type as its own thin pointer data field.
2175                     // This provides information about, e.g., DST struct pointees
2176                     // (which may have no non-DST form), and will work as long
2177                     // as the `Abi` or `FieldsShape` is checked by users.
2178                     if i == 0 {
2179                         let nil = tcx.mk_unit();
2180                         let ptr_ty = if ty.is_unsafe_ptr() {
2181                             tcx.mk_mut_ptr(nil)
2182                         } else {
2183                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2184                         };
2185                         return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2186                             cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2187                                 ptr_layout.ty = ty;
2188                                 ptr_layout
2189                             }),
2190                         ));
2191                     }
2192
2193                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2194                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2195                         ty::Dynamic(_, _) => {
2196                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2197                                 tcx.lifetimes.re_static,
2198                                 tcx.mk_array(tcx.types.usize, 3),
2199                             ))
2200                             /* FIXME: use actual fn pointers
2201                             Warning: naively computing the number of entries in the
2202                             vtable by counting the methods on the trait + methods on
2203                             all parent traits does not work, because some methods can
2204                             be not object safe and thus excluded from the vtable.
2205                             Increase this counter if you tried to implement this but
2206                             failed to do it without duplicating a lot of code from
2207                             other places in the compiler: 2
2208                             tcx.mk_tup(&[
2209                                 tcx.mk_array(tcx.types.usize, 3),
2210                                 tcx.mk_array(Option<fn()>),
2211                             ])
2212                             */
2213                         }
2214                         _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2215                     }
2216                 }
2217
2218                 // Arrays and slices.
2219                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2220                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2221
2222                 // Tuples, generators and closures.
2223                 ty::Closure(_, ref substs) => {
2224                     ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2225                 }
2226
2227                 ty::Generator(def_id, ref substs, _) => match this.variants {
2228                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2229                         substs
2230                             .as_generator()
2231                             .state_tys(def_id, tcx)
2232                             .nth(index.as_usize())
2233                             .unwrap()
2234                             .nth(i)
2235                             .unwrap(),
2236                     ),
2237                     Variants::Multiple { ref tag, tag_field, .. } => {
2238                         if i == tag_field {
2239                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2240                         }
2241                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2242                     }
2243                 },
2244
2245                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2246
2247                 // ADTs.
2248                 ty::Adt(def, substs) => {
2249                     match this.variants {
2250                         Variants::Single { index } => {
2251                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2252                         }
2253
2254                         // Discriminant field for enums (where applicable).
2255                         Variants::Multiple { ref tag, .. } => {
2256                             assert_eq!(i, 0);
2257                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2258                         }
2259                     }
2260                 }
2261
2262                 ty::Projection(_)
2263                 | ty::Bound(..)
2264                 | ty::Placeholder(..)
2265                 | ty::Opaque(..)
2266                 | ty::Param(_)
2267                 | ty::Infer(_)
2268                 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2269             }
2270         }
2271
2272         cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2273             TyMaybeWithLayout::Ty(result) => result,
2274             TyMaybeWithLayout::TyAndLayout(result) => return result,
2275         })
2276     }
2277
2278     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2279         let addr_space_of_ty = |ty: Ty<'tcx>| {
2280             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2281         };
2282
2283         let pointee_info = match *this.ty.kind() {
2284             ty::RawPtr(mt) if offset.bytes() == 0 => {
2285                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2286                     size: layout.size,
2287                     align: layout.align.abi,
2288                     safe: None,
2289                     address_space: addr_space_of_ty(mt.ty),
2290                 })
2291             }
2292             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2293                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2294                     PointeeInfo {
2295                         size: layout.size,
2296                         align: layout.align.abi,
2297                         safe: None,
2298                         address_space: cx.data_layout().instruction_address_space,
2299                     }
2300                 })
2301             }
2302             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2303                 let address_space = addr_space_of_ty(ty);
2304                 let tcx = cx.tcx();
2305                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2306                 let kind = match mt {
2307                     hir::Mutability::Not => {
2308                         if is_freeze {
2309                             PointerKind::Frozen
2310                         } else {
2311                             PointerKind::Shared
2312                         }
2313                     }
2314                     hir::Mutability::Mut => {
2315                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2316                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2317                         // in conjunction with unwinding, but later versions didn’t seem to have
2318                         // said issues. See issue #31681.
2319                         //
2320                         // Alas, later on we encountered a case where noalias would generate wrong
2321                         // code altogether even with recent versions of LLVM in *safe* code with no
2322                         // unwinding involved. See #54462.
2323                         //
2324                         // For now, do not enable mutable_noalias by default at all, while the
2325                         // issue is being figured out.
2326                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2327                             PointerKind::UniqueBorrowed
2328                         } else {
2329                             PointerKind::Shared
2330                         }
2331                     }
2332                 };
2333
2334                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2335                     size: layout.size,
2336                     align: layout.align.abi,
2337                     safe: Some(kind),
2338                     address_space,
2339                 })
2340             }
2341
2342             _ => {
2343                 let mut data_variant = match this.variants {
2344                     // Within the discriminant field, only the niche itself is
2345                     // always initialized, so we only check for a pointer at its
2346                     // offset.
2347                     //
2348                     // If the niche is a pointer, it's either valid (according
2349                     // to its type), or null (which the niche field's scalar
2350                     // validity range encodes).  This allows using
2351                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2352                     // this will continue to work as long as we don't start
2353                     // using more niches than just null (e.g., the first page of
2354                     // the address space, or unaligned pointers).
2355                     Variants::Multiple {
2356                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2357                         tag_field,
2358                         ..
2359                     } if this.fields.offset(tag_field) == offset => {
2360                         Some(this.for_variant(cx, dataful_variant))
2361                     }
2362                     _ => Some(this),
2363                 };
2364
2365                 if let Some(variant) = data_variant {
2366                     // We're not interested in any unions.
2367                     if let FieldsShape::Union(_) = variant.fields {
2368                         data_variant = None;
2369                     }
2370                 }
2371
2372                 let mut result = None;
2373
2374                 if let Some(variant) = data_variant {
2375                     let ptr_end = offset + Pointer.size(cx);
2376                     for i in 0..variant.fields.count() {
2377                         let field_start = variant.fields.offset(i);
2378                         if field_start <= offset {
2379                             let field = variant.field(cx, i);
2380                             result = field.to_result().ok().and_then(|field| {
2381                                 if ptr_end <= field_start + field.size {
2382                                     // We found the right field, look inside it.
2383                                     let field_info =
2384                                         field.pointee_info_at(cx, offset - field_start);
2385                                     field_info
2386                                 } else {
2387                                     None
2388                                 }
2389                             });
2390                             if result.is_some() {
2391                                 break;
2392                             }
2393                         }
2394                     }
2395                 }
2396
2397                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2398                 if let Some(ref mut pointee) = result {
2399                     if let ty::Adt(def, _) = this.ty.kind() {
2400                         if def.is_box() && offset.bytes() == 0 {
2401                             pointee.safe = Some(PointerKind::UniqueOwned);
2402                         }
2403                     }
2404                 }
2405
2406                 result
2407             }
2408         };
2409
2410         debug!(
2411             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2412             offset,
2413             this.ty.kind(),
2414             pointee_info
2415         );
2416
2417         pointee_info
2418     }
2419 }
2420
2421 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2422     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2423         use crate::ty::layout::LayoutError::*;
2424         mem::discriminant(self).hash_stable(hcx, hasher);
2425
2426         match *self {
2427             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2428         }
2429     }
2430 }
2431
2432 impl<'tcx> ty::Instance<'tcx> {
2433     // NOTE(eddyb) this is private to avoid using it from outside of
2434     // `FnAbi::of_instance` - any other uses are either too high-level
2435     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2436     // or should go through `FnAbi` instead, to avoid losing any
2437     // adjustments `FnAbi::of_instance` might be performing.
2438     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2439         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2440         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2441         match *ty.kind() {
2442             ty::FnDef(..) => {
2443                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2444                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2445                 // (i.e. due to being inside a projection that got normalized, see
2446                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2447                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2448                 let mut sig = match *ty.kind() {
2449                     ty::FnDef(def_id, substs) => tcx
2450                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2451                         .subst(tcx, substs),
2452                     _ => unreachable!(),
2453                 };
2454
2455                 if let ty::InstanceDef::VtableShim(..) = self.def {
2456                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2457                     sig = sig.map_bound(|mut sig| {
2458                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2459                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2460                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2461                         sig
2462                     });
2463                 }
2464                 sig
2465             }
2466             ty::Closure(def_id, substs) => {
2467                 let sig = substs.as_closure().sig();
2468
2469                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2470                 sig.map_bound(|sig| {
2471                     tcx.mk_fn_sig(
2472                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2473                         sig.output(),
2474                         sig.c_variadic,
2475                         sig.unsafety,
2476                         sig.abi,
2477                     )
2478                 })
2479             }
2480             ty::Generator(_, substs, _) => {
2481                 let sig = substs.as_generator().poly_sig();
2482
2483                 let br = ty::BoundRegion { kind: ty::BrEnv };
2484                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2485                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2486
2487                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2488                 let pin_adt_ref = tcx.adt_def(pin_did);
2489                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2490                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2491
2492                 sig.map_bound(|sig| {
2493                     let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2494                     let state_adt_ref = tcx.adt_def(state_did);
2495                     let state_substs =
2496                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2497                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2498
2499                     tcx.mk_fn_sig(
2500                         [env_ty, sig.resume_ty].iter(),
2501                         &ret_ty,
2502                         false,
2503                         hir::Unsafety::Normal,
2504                         rustc_target::spec::abi::Abi::Rust,
2505                     )
2506                 })
2507             }
2508             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2509         }
2510     }
2511 }
2512
2513 pub trait FnAbiExt<'tcx, C>
2514 where
2515     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2516         + HasDataLayout
2517         + HasTargetSpec
2518         + HasTyCtxt<'tcx>
2519         + HasParamEnv<'tcx>,
2520 {
2521     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2522     ///
2523     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2524     /// instead, where the instance is a `InstanceDef::Virtual`.
2525     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2526
2527     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2528     /// direct calls to an `fn`.
2529     ///
2530     /// NB: that includes virtual calls, which are represented by "direct calls"
2531     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2532     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2533
2534     fn new_internal(
2535         cx: &C,
2536         sig: ty::PolyFnSig<'tcx>,
2537         extra_args: &[Ty<'tcx>],
2538         caller_location: Option<Ty<'tcx>>,
2539         codegen_fn_attr_flags: CodegenFnAttrFlags,
2540         make_self_ptr_thin: bool,
2541     ) -> Self;
2542     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2543 }
2544
2545 fn fn_can_unwind(
2546     panic_strategy: PanicStrategy,
2547     codegen_fn_attr_flags: CodegenFnAttrFlags,
2548     call_conv: Conv,
2549 ) -> bool {
2550     if panic_strategy != PanicStrategy::Unwind {
2551         // In panic=abort mode we assume nothing can unwind anywhere, so
2552         // optimize based on this!
2553         false
2554     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2555         // If a specific #[unwind] attribute is present, use that.
2556         true
2557     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2558         // Special attribute for allocator functions, which can't unwind.
2559         false
2560     } else {
2561         if call_conv == Conv::Rust {
2562             // Any Rust method (or `extern "Rust" fn` or `extern
2563             // "rust-call" fn`) is explicitly allowed to unwind
2564             // (unless it has no-unwind attribute, handled above).
2565             true
2566         } else {
2567             // Anything else is either:
2568             //
2569             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2570             //
2571             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2572             //
2573             // Foreign items (case 1) are assumed to not unwind; it is
2574             // UB otherwise. (At least for now; see also
2575             // rust-lang/rust#63909 and Rust RFC 2753.)
2576             //
2577             // Items defined in Rust with non-Rust ABIs (case 2) are also
2578             // not supposed to unwind. Whether this should be enforced
2579             // (versus stating it is UB) and *how* it would be enforced
2580             // is currently under discussion; see rust-lang/rust#58794.
2581             //
2582             // In either case, we mark item as explicitly nounwind.
2583             false
2584         }
2585     }
2586 }
2587
2588 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2589 where
2590     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2591         + HasDataLayout
2592         + HasTargetSpec
2593         + HasTyCtxt<'tcx>
2594         + HasParamEnv<'tcx>,
2595 {
2596     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2597         // Assume that fn pointers may always unwind
2598         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2599
2600         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
2601     }
2602
2603     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2604         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2605
2606         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2607             Some(cx.tcx().caller_location_ty())
2608         } else {
2609             None
2610         };
2611
2612         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2613
2614         call::FnAbi::new_internal(
2615             cx,
2616             sig,
2617             extra_args,
2618             caller_location,
2619             attrs,
2620             matches!(instance.def, ty::InstanceDef::Virtual(..)),
2621         )
2622     }
2623
2624     fn new_internal(
2625         cx: &C,
2626         sig: ty::PolyFnSig<'tcx>,
2627         extra_args: &[Ty<'tcx>],
2628         caller_location: Option<Ty<'tcx>>,
2629         codegen_fn_attr_flags: CodegenFnAttrFlags,
2630         force_thin_self_ptr: bool,
2631     ) -> Self {
2632         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2633
2634         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2635
2636         use rustc_target::spec::abi::Abi::*;
2637         let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2638             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2639
2640             // It's the ABI's job to select this, not ours.
2641             System => bug!("system abi should be selected elsewhere"),
2642             EfiApi => bug!("eficall abi should be selected elsewhere"),
2643
2644             Stdcall => Conv::X86Stdcall,
2645             Fastcall => Conv::X86Fastcall,
2646             Vectorcall => Conv::X86VectorCall,
2647             Thiscall => Conv::X86ThisCall,
2648             C => Conv::C,
2649             Unadjusted => Conv::C,
2650             Win64 => Conv::X86_64Win64,
2651             SysV64 => Conv::X86_64SysV,
2652             Aapcs => Conv::ArmAapcs,
2653             PtxKernel => Conv::PtxKernel,
2654             Msp430Interrupt => Conv::Msp430Intr,
2655             X86Interrupt => Conv::X86Intr,
2656             AmdGpuKernel => Conv::AmdGpuKernel,
2657             AvrInterrupt => Conv::AvrInterrupt,
2658             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2659
2660             // These API constants ought to be more specific...
2661             Cdecl => Conv::C,
2662         };
2663
2664         let mut inputs = sig.inputs();
2665         let extra_args = if sig.abi == RustCall {
2666             assert!(!sig.c_variadic && extra_args.is_empty());
2667
2668             if let Some(input) = sig.inputs().last() {
2669                 if let ty::Tuple(tupled_arguments) = input.kind() {
2670                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2671                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2672                 } else {
2673                     bug!(
2674                         "argument to function with \"rust-call\" ABI \
2675                             is not a tuple"
2676                     );
2677                 }
2678             } else {
2679                 bug!(
2680                     "argument to function with \"rust-call\" ABI \
2681                         is not a tuple"
2682                 );
2683             }
2684         } else {
2685             assert!(sig.c_variadic || extra_args.is_empty());
2686             extra_args.to_vec()
2687         };
2688
2689         let target = &cx.tcx().sess.target;
2690         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2691         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2692         let linux_s390x_gnu_like =
2693             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2694         let linux_sparc64_gnu_like =
2695             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2696         let linux_powerpc_gnu_like =
2697             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2698         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2699
2700         // Handle safe Rust thin and fat pointers.
2701         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2702                                       scalar: &Scalar,
2703                                       layout: TyAndLayout<'tcx>,
2704                                       offset: Size,
2705                                       is_return: bool| {
2706             // Booleans are always an i1 that needs to be zero-extended.
2707             if scalar.is_bool() {
2708                 attrs.ext(ArgExtension::Zext);
2709                 return;
2710             }
2711
2712             // Only pointer types handled below.
2713             if scalar.value != Pointer {
2714                 return;
2715             }
2716
2717             if scalar.valid_range.start() < scalar.valid_range.end() {
2718                 if *scalar.valid_range.start() > 0 {
2719                     attrs.set(ArgAttribute::NonNull);
2720                 }
2721             }
2722
2723             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2724                 if let Some(kind) = pointee.safe {
2725                     attrs.pointee_align = Some(pointee.align);
2726
2727                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2728                     // for the entire duration of the function as they can be deallocated
2729                     // at any time. Set their valid size to 0.
2730                     attrs.pointee_size = match kind {
2731                         PointerKind::UniqueOwned => Size::ZERO,
2732                         _ => pointee.size,
2733                     };
2734
2735                     // `Box` pointer parameters never alias because ownership is transferred
2736                     // `&mut` pointer parameters never alias other parameters,
2737                     // or mutable global data
2738                     //
2739                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2740                     // and can be marked as both `readonly` and `noalias`, as
2741                     // LLVM's definition of `noalias` is based solely on memory
2742                     // dependencies rather than pointer equality
2743                     let no_alias = match kind {
2744                         PointerKind::Shared => false,
2745                         PointerKind::UniqueOwned => true,
2746                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2747                     };
2748                     if no_alias {
2749                         attrs.set(ArgAttribute::NoAlias);
2750                     }
2751
2752                     if kind == PointerKind::Frozen && !is_return {
2753                         attrs.set(ArgAttribute::ReadOnly);
2754                     }
2755                 }
2756             }
2757         };
2758
2759         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2760             let is_return = arg_idx.is_none();
2761
2762             let layout = cx.layout_of(ty);
2763             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2764                 // Don't pass the vtable, it's not an argument of the virtual fn.
2765                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2766                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2767                 make_thin_self_ptr(cx, layout)
2768             } else {
2769                 layout
2770             };
2771
2772             let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2773                 let mut attrs = ArgAttributes::new();
2774                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2775                 attrs
2776             });
2777
2778             if arg.layout.is_zst() {
2779                 // For some forsaken reason, x86_64-pc-windows-gnu
2780                 // doesn't ignore zero-sized struct arguments.
2781                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2782                 if is_return
2783                     || rust_abi
2784                     || (!win_x64_gnu
2785                         && !linux_s390x_gnu_like
2786                         && !linux_sparc64_gnu_like
2787                         && !linux_powerpc_gnu_like)
2788                 {
2789                     arg.mode = PassMode::Ignore;
2790                 }
2791             }
2792
2793             arg
2794         };
2795
2796         let mut fn_abi = FnAbi {
2797             ret: arg_of(sig.output(), None),
2798             args: inputs
2799                 .iter()
2800                 .cloned()
2801                 .chain(extra_args)
2802                 .chain(caller_location)
2803                 .enumerate()
2804                 .map(|(i, ty)| arg_of(ty, Some(i)))
2805                 .collect(),
2806             c_variadic: sig.c_variadic,
2807             fixed_count: inputs.len(),
2808             conv,
2809             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2810         };
2811         fn_abi.adjust_for_abi(cx, sig.abi);
2812         debug!("FnAbi::new_internal = {:?}", fn_abi);
2813         fn_abi
2814     }
2815
2816     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2817         if abi == SpecAbi::Unadjusted {
2818             return;
2819         }
2820
2821         if abi == SpecAbi::Rust
2822             || abi == SpecAbi::RustCall
2823             || abi == SpecAbi::RustIntrinsic
2824             || abi == SpecAbi::PlatformIntrinsic
2825         {
2826             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2827                 if arg.is_ignore() {
2828                     return;
2829                 }
2830
2831                 match arg.layout.abi {
2832                     Abi::Aggregate { .. } => {}
2833
2834                     // This is a fun case! The gist of what this is doing is
2835                     // that we want callers and callees to always agree on the
2836                     // ABI of how they pass SIMD arguments. If we were to *not*
2837                     // make these arguments indirect then they'd be immediates
2838                     // in LLVM, which means that they'd used whatever the
2839                     // appropriate ABI is for the callee and the caller. That
2840                     // means, for example, if the caller doesn't have AVX
2841                     // enabled but the callee does, then passing an AVX argument
2842                     // across this boundary would cause corrupt data to show up.
2843                     //
2844                     // This problem is fixed by unconditionally passing SIMD
2845                     // arguments through memory between callers and callees
2846                     // which should get them all to agree on ABI regardless of
2847                     // target feature sets. Some more information about this
2848                     // issue can be found in #44367.
2849                     //
2850                     // Note that the platform intrinsic ABI is exempt here as
2851                     // that's how we connect up to LLVM and it's unstable
2852                     // anyway, we control all calls to it in libstd.
2853                     Abi::Vector { .. }
2854                         if abi != SpecAbi::PlatformIntrinsic
2855                             && cx.tcx().sess.target.simd_types_indirect =>
2856                     {
2857                         arg.make_indirect();
2858                         return;
2859                     }
2860
2861                     _ => return,
2862                 }
2863
2864                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2865                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2866                 let max_by_val_size = Pointer.size(cx) * 2;
2867                 let size = arg.layout.size;
2868
2869                 if arg.layout.is_unsized() || size > max_by_val_size {
2870                     arg.make_indirect();
2871                 } else {
2872                     // We want to pass small aggregates as immediates, but using
2873                     // a LLVM aggregate type for this leads to bad optimizations,
2874                     // so we pick an appropriately sized integer type instead.
2875                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2876                 }
2877             };
2878             fixup(&mut self.ret);
2879             for arg in &mut self.args {
2880                 fixup(arg);
2881             }
2882             return;
2883         }
2884
2885         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2886             cx.tcx().sess.fatal(&msg);
2887         }
2888     }
2889 }
2890
2891 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
2892 where
2893     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2894         + HasTyCtxt<'tcx>
2895         + HasParamEnv<'tcx>,
2896 {
2897     let fat_pointer_ty = if layout.is_unsized() {
2898         // unsized `self` is passed as a pointer to `self`
2899         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2900         cx.tcx().mk_mut_ptr(layout.ty)
2901     } else {
2902         match layout.abi {
2903             Abi::ScalarPair(..) => (),
2904             _ => bug!("receiver type has unsupported layout: {:?}", layout),
2905         }
2906
2907         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2908         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2909         // elsewhere in the compiler as a method on a `dyn Trait`.
2910         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2911         // get a built-in pointer type
2912         let mut fat_pointer_layout = layout;
2913         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2914             && !fat_pointer_layout.ty.is_region_ptr()
2915         {
2916             for i in 0..fat_pointer_layout.fields.count() {
2917                 let field_layout = fat_pointer_layout.field(cx, i);
2918
2919                 if !field_layout.is_zst() {
2920                     fat_pointer_layout = field_layout;
2921                     continue 'descend_newtypes;
2922                 }
2923             }
2924
2925             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2926         }
2927
2928         fat_pointer_layout.ty
2929     };
2930
2931     // we now have a type like `*mut RcBox<dyn Trait>`
2932     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2933     // this is understood as a special case elsewhere in the compiler
2934     let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2935     layout = cx.layout_of(unit_pointer_ty);
2936     layout.ty = fat_pointer_ty;
2937     layout
2938 }