]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Rollup merge of #79145 - camelid:clippy-fix-panics, r=flip1995
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast::{self as ast, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'tcx>(
34         tcx: TyCtxt<'tcx>,
35         ty: Ty<'tcx>,
36         repr: &ReprOptions,
37         min: i128,
38         max: i128,
39     ) -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Gets the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Finds the appropriate Integer type and signedness for the given
75     /// signed discriminant range and `#[repr]` attribute.
76     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'tcx>(
79         tcx: TyCtxt<'tcx>,
80         ty: Ty<'tcx>,
81         repr: &ReprOptions,
82         min: i128,
83         max: i128,
84     ) -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!(
100                     "Integer::repr_discr: `#[repr]` hint too small for \
101                       discriminant range of enum `{}",
102                     ty
103                 )
104             }
105             return (discr, ity.is_signed());
106         }
107
108         if repr.c() {
109             match &tcx.sess.target.arch[..] {
110                 // WARNING: the ARM EABI has two variants; the one corresponding
111                 // to `at_least == I32` appears to be used on Linux and NetBSD,
112                 // but some systems may use the variant corresponding to no
113                 // lower bound. However, we don't run on those yet...?
114                 "arm" => min_from_extern = Some(I32),
115                 _ => min_from_extern = Some(I32),
116             }
117         }
118
119         let at_least = min_from_extern.unwrap_or(min_default);
120
121         // If there are no negative values, we can use the unsigned fit.
122         if min >= 0 {
123             (cmp::max(unsigned_fit, at_least), false)
124         } else {
125             (cmp::max(signed_fit, at_least), true)
126         }
127     }
128 }
129
130 pub trait PrimitiveExt {
131     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
133 }
134
135 impl PrimitiveExt for Primitive {
136     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
137         match *self {
138             Int(i, signed) => i.to_ty(tcx, signed),
139             F32 => tcx.types.f32,
140             F64 => tcx.types.f64,
141             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142         }
143     }
144
145     /// Return an *integer* type matching this primitive.
146     /// Useful in particular when dealing with enum discriminants.
147     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148         match *self {
149             Int(i, signed) => i.to_ty(tcx, signed),
150             Pointer => tcx.types.usize,
151             F32 | F64 => bug!("floats do not have an int type"),
152         }
153     }
154 }
155
156 /// The first half of a fat pointer.
157 ///
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
161
162 /// The second half of a fat pointer.
163 ///
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
167
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
170     Unknown(Ty<'tcx>),
171     SizeOverflow(Ty<'tcx>),
172 }
173
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
176         match *self {
177             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
178             LayoutError::SizeOverflow(ty) => {
179                 write!(f, "the type `{}` is too big for the current architecture", ty)
180             }
181         }
182     }
183 }
184
185 fn layout_raw<'tcx>(
186     tcx: TyCtxt<'tcx>,
187     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189     ty::tls::with_related_context(tcx, move |icx| {
190         let (param_env, ty) = query.into_parts();
191
192         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
194         }
195
196         // Update the ImplicitCtxt to increase the layout_depth
197         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
198
199         ty::tls::enter_context(&icx, |_| {
200             let cx = LayoutCx { tcx, param_env };
201             let layout = cx.layout_raw_uncached(ty);
202             // Type-level uninhabitedness should always imply ABI uninhabitedness.
203             if let Ok(layout) = layout {
204                 if ty.conservative_is_privately_uninhabited(tcx) {
205                     assert!(layout.abi.is_uninhabited());
206                 }
207             }
208             layout
209         })
210     })
211 }
212
213 pub fn provide(providers: &mut ty::query::Providers) {
214     *providers = ty::query::Providers { layout_raw, ..*providers };
215 }
216
217 pub struct LayoutCx<'tcx, C> {
218     pub tcx: C,
219     pub param_env: ty::ParamEnv<'tcx>,
220 }
221
222 #[derive(Copy, Clone, Debug)]
223 enum StructKind {
224     /// A tuple, closure, or univariant which cannot be coerced to unsized.
225     AlwaysSized,
226     /// A univariant, the last field of which may be coerced to unsized.
227     MaybeUnsized,
228     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229     Prefixed(Size, Align),
230 }
231
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238     let mut inverse = vec![0; map.len()];
239     for i in 0..map.len() {
240         inverse[map[i] as usize] = i as u32;
241     }
242     inverse
243 }
244
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247         let dl = self.data_layout();
248         let b_align = b.value.align(dl);
249         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250         let b_offset = a.value.size(dl).align_to(b_align.abi);
251         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
252
253         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254         // returns the last maximum.
255         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256             .into_iter()
257             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258             .max_by_key(|niche| niche.available(dl));
259
260         Layout {
261             variants: Variants::Single { index: VariantIdx::new(0) },
262             fields: FieldsShape::Arbitrary {
263                 offsets: vec![Size::ZERO, b_offset],
264                 memory_index: vec![0, 1],
265             },
266             abi: Abi::ScalarPair(a, b),
267             largest_niche,
268             align,
269             size,
270         }
271     }
272
273     fn univariant_uninterned(
274         &self,
275         ty: Ty<'tcx>,
276         fields: &[TyAndLayout<'_>],
277         repr: &ReprOptions,
278         kind: StructKind,
279     ) -> Result<Layout, LayoutError<'tcx>> {
280         let dl = self.data_layout();
281         let pack = repr.pack;
282         if pack.is_some() && repr.align.is_some() {
283             bug!("struct cannot be packed and aligned");
284         }
285
286         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
287
288         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
289
290         let optimize = !repr.inhibit_struct_field_reordering_opt();
291         if optimize {
292             let end =
293                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294             let optimizing = &mut inverse_memory_index[..end];
295             let field_align = |f: &TyAndLayout<'_>| {
296                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
297             };
298             match kind {
299                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300                     optimizing.sort_by_key(|&x| {
301                         // Place ZSTs first to avoid "interesting offsets",
302                         // especially with only one or two non-ZST fields.
303                         let f = &fields[x as usize];
304                         (!f.is_zst(), cmp::Reverse(field_align(f)))
305                     });
306                 }
307                 StructKind::Prefixed(..) => {
308                     // Sort in ascending alignment so that the layout stay optimal
309                     // regardless of the prefix
310                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
311                 }
312             }
313         }
314
315         // inverse_memory_index holds field indices by increasing memory offset.
316         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317         // We now write field offsets to the corresponding offset slot;
318         // field 5 with offset 0 puts 0 in offsets[5].
319         // At the bottom of this function, we invert `inverse_memory_index` to
320         // produce `memory_index` (see `invert_mapping`).
321
322         let mut sized = true;
323         let mut offsets = vec![Size::ZERO; fields.len()];
324         let mut offset = Size::ZERO;
325         let mut largest_niche = None;
326         let mut largest_niche_available = 0;
327
328         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
329             let prefix_align =
330                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331             align = align.max(AbiAndPrefAlign::new(prefix_align));
332             offset = prefix_size.align_to(prefix_align);
333         }
334
335         for &i in &inverse_memory_index {
336             let field = fields[i as usize];
337             if !sized {
338                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
339             }
340
341             if field.is_unsized() {
342                 sized = false;
343             }
344
345             // Invariant: offset < dl.obj_size_bound() <= 1<<61
346             let field_align = if let Some(pack) = pack {
347                 field.align.min(AbiAndPrefAlign::new(pack))
348             } else {
349                 field.align
350             };
351             offset = offset.align_to(field_align.abi);
352             align = align.max(field_align);
353
354             debug!("univariant offset: {:?} field: {:#?}", offset, field);
355             offsets[i as usize] = offset;
356
357             if !repr.hide_niche() {
358                 if let Some(mut niche) = field.largest_niche.clone() {
359                     let available = niche.available(dl);
360                     if available > largest_niche_available {
361                         largest_niche_available = available;
362                         niche.offset += offset;
363                         largest_niche = Some(niche);
364                     }
365                 }
366             }
367
368             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
369         }
370
371         if let Some(repr_align) = repr.align {
372             align = align.max(AbiAndPrefAlign::new(repr_align));
373         }
374
375         debug!("univariant min_size: {:?}", offset);
376         let min_size = offset;
377
378         // As stated above, inverse_memory_index holds field indices by increasing offset.
379         // This makes it an already-sorted view of the offsets vec.
380         // To invert it, consider:
381         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382         // Field 5 would be the first element, so memory_index is i:
383         // Note: if we didn't optimize, it's already right.
384
385         let memory_index =
386             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
387
388         let size = min_size.align_to(align.abi);
389         let mut abi = Abi::Aggregate { sized };
390
391         // Unpack newtype ABIs and find scalar pairs.
392         if sized && size.bytes() > 0 {
393             // All other fields must be ZSTs.
394             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
395
396             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397                 // We have exactly one non-ZST field.
398                 (Some((i, field)), None, None) => {
399                     // Field fills the struct and it has a scalar or scalar pair ABI.
400                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
401                     {
402                         match field.abi {
403                             // For plain scalars, or vectors of them, we can't unpack
404                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
405                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406                                 abi = field.abi.clone();
407                             }
408                             // But scalar pairs are Rust-specific and get
409                             // treated as aggregates by C ABIs anyway.
410                             Abi::ScalarPair(..) => {
411                                 abi = field.abi.clone();
412                             }
413                             _ => {}
414                         }
415                     }
416                 }
417
418                 // Two non-ZST fields, and they're both scalars.
419                 (
420                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
422                     None,
423                 ) => {
424                     // Order by the memory placement, not source order.
425                     let ((i, a), (j, b)) =
426                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427                     let pair = self.scalar_pair(a.clone(), b.clone());
428                     let pair_offsets = match pair.fields {
429                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430                             assert_eq!(memory_index, &[0, 1]);
431                             offsets
432                         }
433                         _ => bug!(),
434                     };
435                     if offsets[i] == pair_offsets[0]
436                         && offsets[j] == pair_offsets[1]
437                         && align == pair.align
438                         && size == pair.size
439                     {
440                         // We can use `ScalarPair` only when it matches our
441                         // already computed layout (including `#[repr(C)]`).
442                         abi = pair.abi;
443                     }
444                 }
445
446                 _ => {}
447             }
448         }
449
450         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451             abi = Abi::Uninhabited;
452         }
453
454         Ok(Layout {
455             variants: Variants::Single { index: VariantIdx::new(0) },
456             fields: FieldsShape::Arbitrary { offsets, memory_index },
457             abi,
458             largest_niche,
459             align,
460             size,
461         })
462     }
463
464     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
465         let tcx = self.tcx;
466         let param_env = self.param_env;
467         let dl = self.data_layout();
468         let scalar_unit = |value: Primitive| {
469             let bits = value.size(dl).bits();
470             assert!(bits <= 128);
471             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
472         };
473         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
474
475         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
476             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
477         };
478         debug_assert!(!ty.has_infer_types_or_consts());
479
480         Ok(match *ty.kind() {
481             // Basic scalars.
482             ty::Bool => tcx.intern_layout(Layout::scalar(
483                 self,
484                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
485             )),
486             ty::Char => tcx.intern_layout(Layout::scalar(
487                 self,
488                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
489             )),
490             ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491             ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
492             ty::Float(fty) => scalar(match fty {
493                 ast::FloatTy::F32 => F32,
494                 ast::FloatTy::F64 => F64,
495             }),
496             ty::FnPtr(_) => {
497                 let mut ptr = scalar_unit(Pointer);
498                 ptr.valid_range = 1..=*ptr.valid_range.end();
499                 tcx.intern_layout(Layout::scalar(self, ptr))
500             }
501
502             // The never type.
503             ty::Never => tcx.intern_layout(Layout {
504                 variants: Variants::Single { index: VariantIdx::new(0) },
505                 fields: FieldsShape::Primitive,
506                 abi: Abi::Uninhabited,
507                 largest_niche: None,
508                 align: dl.i8_align,
509                 size: Size::ZERO,
510             }),
511
512             // Potentially-wide pointers.
513             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514                 let mut data_ptr = scalar_unit(Pointer);
515                 if !ty.is_unsafe_ptr() {
516                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517                 }
518
519                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
522                 }
523
524                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
525                 let metadata = match unsized_part.kind() {
526                     ty::Foreign(..) => {
527                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
528                     }
529                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
530                     ty::Dynamic(..) => {
531                         let mut vtable = scalar_unit(Pointer);
532                         vtable.valid_range = 1..=*vtable.valid_range.end();
533                         vtable
534                     }
535                     _ => return Err(LayoutError::Unknown(unsized_part)),
536                 };
537
538                 // Effectively a (ptr, meta) tuple.
539                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
540             }
541
542             // Arrays and slices.
543             ty::Array(element, mut count) => {
544                 if count.has_projections() {
545                     count = tcx.normalize_erasing_regions(param_env, count);
546                     if count.has_projections() {
547                         return Err(LayoutError::Unknown(ty));
548                     }
549                 }
550
551                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
552                 let element = self.layout_of(element)?;
553                 let size =
554                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
555
556                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
557                     Abi::Uninhabited
558                 } else {
559                     Abi::Aggregate { sized: true }
560                 };
561
562                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
563
564                 tcx.intern_layout(Layout {
565                     variants: Variants::Single { index: VariantIdx::new(0) },
566                     fields: FieldsShape::Array { stride: element.size, count },
567                     abi,
568                     largest_niche,
569                     align: element.align,
570                     size,
571                 })
572             }
573             ty::Slice(element) => {
574                 let element = self.layout_of(element)?;
575                 tcx.intern_layout(Layout {
576                     variants: Variants::Single { index: VariantIdx::new(0) },
577                     fields: FieldsShape::Array { stride: element.size, count: 0 },
578                     abi: Abi::Aggregate { sized: false },
579                     largest_niche: None,
580                     align: element.align,
581                     size: Size::ZERO,
582                 })
583             }
584             ty::Str => tcx.intern_layout(Layout {
585                 variants: Variants::Single { index: VariantIdx::new(0) },
586                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
587                 abi: Abi::Aggregate { sized: false },
588                 largest_niche: None,
589                 align: dl.i8_align,
590                 size: Size::ZERO,
591             }),
592
593             // Odd unit types.
594             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
595             ty::Dynamic(..) | ty::Foreign(..) => {
596                 let mut unit = self.univariant_uninterned(
597                     ty,
598                     &[],
599                     &ReprOptions::default(),
600                     StructKind::AlwaysSized,
601                 )?;
602                 match unit.abi {
603                     Abi::Aggregate { ref mut sized } => *sized = false,
604                     _ => bug!(),
605                 }
606                 tcx.intern_layout(unit)
607             }
608
609             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
610
611             ty::Closure(_, ref substs) => {
612                 let tys = substs.as_closure().upvar_tys();
613                 univariant(
614                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
615                     &ReprOptions::default(),
616                     StructKind::AlwaysSized,
617                 )?
618             }
619
620             ty::Tuple(tys) => {
621                 let kind =
622                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
623
624                 univariant(
625                     &tys.iter()
626                         .map(|k| self.layout_of(k.expect_ty()))
627                         .collect::<Result<Vec<_>, _>>()?,
628                     &ReprOptions::default(),
629                     kind,
630                 )?
631             }
632
633             // SIMD vector types.
634             ty::Adt(def, ..) if def.repr.simd() => {
635                 let element = self.layout_of(ty.simd_type(tcx))?;
636                 let count = ty.simd_size(tcx);
637                 assert!(count > 0);
638                 let scalar = match element.abi {
639                     Abi::Scalar(ref scalar) => scalar.clone(),
640                     _ => {
641                         tcx.sess.fatal(&format!(
642                             "monomorphising SIMD type `{}` with \
643                                                  a non-machine element type `{}`",
644                             ty, element.ty
645                         ));
646                     }
647                 };
648                 let size =
649                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
650                 let align = dl.vector_align(size);
651                 let size = size.align_to(align.abi);
652
653                 tcx.intern_layout(Layout {
654                     variants: Variants::Single { index: VariantIdx::new(0) },
655                     fields: FieldsShape::Array { stride: element.size, count },
656                     abi: Abi::Vector { element: scalar, count },
657                     largest_niche: element.largest_niche.clone(),
658                     size,
659                     align,
660                 })
661             }
662
663             // ADTs.
664             ty::Adt(def, substs) => {
665                 // Cache the field layouts.
666                 let variants = def
667                     .variants
668                     .iter()
669                     .map(|v| {
670                         v.fields
671                             .iter()
672                             .map(|field| self.layout_of(field.ty(tcx, substs)))
673                             .collect::<Result<Vec<_>, _>>()
674                     })
675                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
676
677                 if def.is_union() {
678                     if def.repr.pack.is_some() && def.repr.align.is_some() {
679                         bug!("union cannot be packed and aligned");
680                     }
681
682                     let mut align =
683                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
684
685                     if let Some(repr_align) = def.repr.align {
686                         align = align.max(AbiAndPrefAlign::new(repr_align));
687                     }
688
689                     let optimize = !def.repr.inhibit_union_abi_opt();
690                     let mut size = Size::ZERO;
691                     let mut abi = Abi::Aggregate { sized: true };
692                     let index = VariantIdx::new(0);
693                     for field in &variants[index] {
694                         assert!(!field.is_unsized());
695                         align = align.max(field.align);
696
697                         // If all non-ZST fields have the same ABI, forward this ABI
698                         if optimize && !field.is_zst() {
699                             // Normalize scalar_unit to the maximal valid range
700                             let field_abi = match &field.abi {
701                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
702                                 Abi::ScalarPair(x, y) => {
703                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
704                                 }
705                                 Abi::Vector { element: x, count } => {
706                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
707                                 }
708                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
709                                     Abi::Aggregate { sized: true }
710                                 }
711                             };
712
713                             if size == Size::ZERO {
714                                 // first non ZST: initialize 'abi'
715                                 abi = field_abi;
716                             } else if abi != field_abi {
717                                 // different fields have different ABI: reset to Aggregate
718                                 abi = Abi::Aggregate { sized: true };
719                             }
720                         }
721
722                         size = cmp::max(size, field.size);
723                     }
724
725                     if let Some(pack) = def.repr.pack {
726                         align = align.min(AbiAndPrefAlign::new(pack));
727                     }
728
729                     return Ok(tcx.intern_layout(Layout {
730                         variants: Variants::Single { index },
731                         fields: FieldsShape::Union(
732                             NonZeroUsize::new(variants[index].len())
733                                 .ok_or(LayoutError::Unknown(ty))?,
734                         ),
735                         abi,
736                         largest_niche: None,
737                         align,
738                         size: size.align_to(align.abi),
739                     }));
740                 }
741
742                 // A variant is absent if it's uninhabited and only has ZST fields.
743                 // Present uninhabited variants only require space for their fields,
744                 // but *not* an encoding of the discriminant (e.g., a tag value).
745                 // See issue #49298 for more details on the need to leave space
746                 // for non-ZST uninhabited data (mostly partial initialization).
747                 let absent = |fields: &[TyAndLayout<'_>]| {
748                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
749                     let is_zst = fields.iter().all(|f| f.is_zst());
750                     uninhabited && is_zst
751                 };
752                 let (present_first, present_second) = {
753                     let mut present_variants = variants
754                         .iter_enumerated()
755                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
756                     (present_variants.next(), present_variants.next())
757                 };
758                 let present_first = match present_first {
759                     Some(present_first) => present_first,
760                     // Uninhabited because it has no variants, or only absent ones.
761                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
762                     // If it's a struct, still compute a layout so that we can still compute the
763                     // field offsets.
764                     None => VariantIdx::new(0),
765                 };
766
767                 let is_struct = !def.is_enum() ||
768                     // Only one variant is present.
769                     (present_second.is_none() &&
770                     // Representation optimizations are allowed.
771                     !def.repr.inhibit_enum_layout_opt());
772                 if is_struct {
773                     // Struct, or univariant enum equivalent to a struct.
774                     // (Typechecking will reject discriminant-sizing attrs.)
775
776                     let v = present_first;
777                     let kind = if def.is_enum() || variants[v].is_empty() {
778                         StructKind::AlwaysSized
779                     } else {
780                         let param_env = tcx.param_env(def.did);
781                         let last_field = def.variants[v].fields.last().unwrap();
782                         let always_sized =
783                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
784                         if !always_sized {
785                             StructKind::MaybeUnsized
786                         } else {
787                             StructKind::AlwaysSized
788                         }
789                     };
790
791                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
792                     st.variants = Variants::Single { index: v };
793                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
794                     match st.abi {
795                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
796                             // the asserts ensure that we are not using the
797                             // `#[rustc_layout_scalar_valid_range(n)]`
798                             // attribute to widen the range of anything as that would probably
799                             // result in UB somewhere
800                             // FIXME(eddyb) the asserts are probably not needed,
801                             // as larger validity ranges would result in missed
802                             // optimizations, *not* wrongly assuming the inner
803                             // value is valid. e.g. unions enlarge validity ranges,
804                             // because the values may be uninitialized.
805                             if let Bound::Included(start) = start {
806                                 // FIXME(eddyb) this might be incorrect - it doesn't
807                                 // account for wrap-around (end < start) ranges.
808                                 assert!(*scalar.valid_range.start() <= start);
809                                 scalar.valid_range = start..=*scalar.valid_range.end();
810                             }
811                             if let Bound::Included(end) = end {
812                                 // FIXME(eddyb) this might be incorrect - it doesn't
813                                 // account for wrap-around (end < start) ranges.
814                                 assert!(*scalar.valid_range.end() >= end);
815                                 scalar.valid_range = *scalar.valid_range.start()..=end;
816                             }
817
818                             // Update `largest_niche` if we have introduced a larger niche.
819                             let niche = if def.repr.hide_niche() {
820                                 None
821                             } else {
822                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
823                             };
824                             if let Some(niche) = niche {
825                                 match &st.largest_niche {
826                                     Some(largest_niche) => {
827                                         // Replace the existing niche even if they're equal,
828                                         // because this one is at a lower offset.
829                                         if largest_niche.available(dl) <= niche.available(dl) {
830                                             st.largest_niche = Some(niche);
831                                         }
832                                     }
833                                     None => st.largest_niche = Some(niche),
834                                 }
835                             }
836                         }
837                         _ => assert!(
838                             start == Bound::Unbounded && end == Bound::Unbounded,
839                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
840                             def,
841                             st,
842                         ),
843                     }
844
845                     return Ok(tcx.intern_layout(st));
846                 }
847
848                 // At this point, we have handled all unions and
849                 // structs. (We have also handled univariant enums
850                 // that allow representation optimization.)
851                 assert!(def.is_enum());
852
853                 // The current code for niche-filling relies on variant indices
854                 // instead of actual discriminants, so dataful enums with
855                 // explicit discriminants (RFC #2363) would misbehave.
856                 let no_explicit_discriminants = def
857                     .variants
858                     .iter_enumerated()
859                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
860
861                 let mut niche_filling_layout = None;
862
863                 // Niche-filling enum optimization.
864                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
865                     let mut dataful_variant = None;
866                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
867
868                     // Find one non-ZST variant.
869                     'variants: for (v, fields) in variants.iter_enumerated() {
870                         if absent(fields) {
871                             continue 'variants;
872                         }
873                         for f in fields {
874                             if !f.is_zst() {
875                                 if dataful_variant.is_none() {
876                                     dataful_variant = Some(v);
877                                     continue 'variants;
878                                 } else {
879                                     dataful_variant = None;
880                                     break 'variants;
881                                 }
882                             }
883                         }
884                         niche_variants = *niche_variants.start().min(&v)..=v;
885                     }
886
887                     if niche_variants.start() > niche_variants.end() {
888                         dataful_variant = None;
889                     }
890
891                     if let Some(i) = dataful_variant {
892                         let count = (niche_variants.end().as_u32()
893                             - niche_variants.start().as_u32()
894                             + 1) as u128;
895
896                         // Find the field with the largest niche
897                         let niche_candidate = variants[i]
898                             .iter()
899                             .enumerate()
900                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
901                             .max_by_key(|(_, niche)| niche.available(dl));
902
903                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
904                             niche_candidate.and_then(|(field_index, niche)| {
905                                 Some((field_index, niche, niche.reserve(self, count)?))
906                             })
907                         {
908                             let mut align = dl.aggregate_align;
909                             let st = variants
910                                 .iter_enumerated()
911                                 .map(|(j, v)| {
912                                     let mut st = self.univariant_uninterned(
913                                         ty,
914                                         v,
915                                         &def.repr,
916                                         StructKind::AlwaysSized,
917                                     )?;
918                                     st.variants = Variants::Single { index: j };
919
920                                     align = align.max(st.align);
921
922                                     Ok(st)
923                                 })
924                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
925
926                             let offset = st[i].fields.offset(field_index) + niche.offset;
927                             let size = st[i].size;
928
929                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
930                                 Abi::Uninhabited
931                             } else {
932                                 match st[i].abi {
933                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
934                                     Abi::ScalarPair(ref first, ref second) => {
935                                         // We need to use scalar_unit to reset the
936                                         // valid range to the maximal one for that
937                                         // primitive, because only the niche is
938                                         // guaranteed to be initialised, not the
939                                         // other primitive.
940                                         if offset.bytes() == 0 {
941                                             Abi::ScalarPair(
942                                                 niche_scalar.clone(),
943                                                 scalar_unit(second.value),
944                                             )
945                                         } else {
946                                             Abi::ScalarPair(
947                                                 scalar_unit(first.value),
948                                                 niche_scalar.clone(),
949                                             )
950                                         }
951                                     }
952                                     _ => Abi::Aggregate { sized: true },
953                                 }
954                             };
955
956                             let largest_niche =
957                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
958
959                             niche_filling_layout = Some(Layout {
960                                 variants: Variants::Multiple {
961                                     tag: niche_scalar,
962                                     tag_encoding: TagEncoding::Niche {
963                                         dataful_variant: i,
964                                         niche_variants,
965                                         niche_start,
966                                     },
967                                     tag_field: 0,
968                                     variants: st,
969                                 },
970                                 fields: FieldsShape::Arbitrary {
971                                     offsets: vec![offset],
972                                     memory_index: vec![0],
973                                 },
974                                 abi,
975                                 largest_niche,
976                                 size,
977                                 align,
978                             });
979                         }
980                     }
981                 }
982
983                 let (mut min, mut max) = (i128::MAX, i128::MIN);
984                 let discr_type = def.repr.discr_type();
985                 let bits = Integer::from_attr(self, discr_type).size().bits();
986                 for (i, discr) in def.discriminants(tcx) {
987                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
988                         continue;
989                     }
990                     let mut x = discr.val as i128;
991                     if discr_type.is_signed() {
992                         // sign extend the raw representation to be an i128
993                         x = (x << (128 - bits)) >> (128 - bits);
994                     }
995                     if x < min {
996                         min = x;
997                     }
998                     if x > max {
999                         max = x;
1000                     }
1001                 }
1002                 // We might have no inhabited variants, so pretend there's at least one.
1003                 if (min, max) == (i128::MAX, i128::MIN) {
1004                     min = 0;
1005                     max = 0;
1006                 }
1007                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1008                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1009
1010                 let mut align = dl.aggregate_align;
1011                 let mut size = Size::ZERO;
1012
1013                 // We're interested in the smallest alignment, so start large.
1014                 let mut start_align = Align::from_bytes(256).unwrap();
1015                 assert_eq!(Integer::for_align(dl, start_align), None);
1016
1017                 // repr(C) on an enum tells us to make a (tag, union) layout,
1018                 // so we need to grow the prefix alignment to be at least
1019                 // the alignment of the union. (This value is used both for
1020                 // determining the alignment of the overall enum, and the
1021                 // determining the alignment of the payload after the tag.)
1022                 let mut prefix_align = min_ity.align(dl).abi;
1023                 if def.repr.c() {
1024                     for fields in &variants {
1025                         for field in fields {
1026                             prefix_align = prefix_align.max(field.align.abi);
1027                         }
1028                     }
1029                 }
1030
1031                 // Create the set of structs that represent each variant.
1032                 let mut layout_variants = variants
1033                     .iter_enumerated()
1034                     .map(|(i, field_layouts)| {
1035                         let mut st = self.univariant_uninterned(
1036                             ty,
1037                             &field_layouts,
1038                             &def.repr,
1039                             StructKind::Prefixed(min_ity.size(), prefix_align),
1040                         )?;
1041                         st.variants = Variants::Single { index: i };
1042                         // Find the first field we can't move later
1043                         // to make room for a larger discriminant.
1044                         for field in
1045                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1046                         {
1047                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1048                                 start_align = start_align.min(field.align.abi);
1049                                 break;
1050                             }
1051                         }
1052                         size = cmp::max(size, st.size);
1053                         align = align.max(st.align);
1054                         Ok(st)
1055                     })
1056                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1057
1058                 // Align the maximum variant size to the largest alignment.
1059                 size = size.align_to(align.abi);
1060
1061                 if size.bytes() >= dl.obj_size_bound() {
1062                     return Err(LayoutError::SizeOverflow(ty));
1063                 }
1064
1065                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1066                 if typeck_ity < min_ity {
1067                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1068                     // some reason at this point (based on values discriminant can take on). Mostly
1069                     // because this discriminant will be loaded, and then stored into variable of
1070                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1071                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1072                     // discriminant values. That would be a bug, because then, in codegen, in order
1073                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1074                     // space necessary to represent would have to be discarded (or layout is wrong
1075                     // on thinking it needs 16 bits)
1076                     bug!(
1077                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1078                         min_ity,
1079                         typeck_ity
1080                     );
1081                     // However, it is fine to make discr type however large (as an optimisation)
1082                     // after this point â€“ we’ll just truncate the value we load in codegen.
1083                 }
1084
1085                 // Check to see if we should use a different type for the
1086                 // discriminant. We can safely use a type with the same size
1087                 // as the alignment of the first field of each variant.
1088                 // We increase the size of the discriminant to avoid LLVM copying
1089                 // padding when it doesn't need to. This normally causes unaligned
1090                 // load/stores and excessive memcpy/memset operations. By using a
1091                 // bigger integer size, LLVM can be sure about its contents and
1092                 // won't be so conservative.
1093
1094                 // Use the initial field alignment
1095                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1096                     min_ity
1097                 } else {
1098                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1099                 };
1100
1101                 // If the alignment is not larger than the chosen discriminant size,
1102                 // don't use the alignment as the final size.
1103                 if ity <= min_ity {
1104                     ity = min_ity;
1105                 } else {
1106                     // Patch up the variants' first few fields.
1107                     let old_ity_size = min_ity.size();
1108                     let new_ity_size = ity.size();
1109                     for variant in &mut layout_variants {
1110                         match variant.fields {
1111                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1112                                 for i in offsets {
1113                                     if *i <= old_ity_size {
1114                                         assert_eq!(*i, old_ity_size);
1115                                         *i = new_ity_size;
1116                                     }
1117                                 }
1118                                 // We might be making the struct larger.
1119                                 if variant.size <= old_ity_size {
1120                                     variant.size = new_ity_size;
1121                                 }
1122                             }
1123                             _ => bug!(),
1124                         }
1125                     }
1126                 }
1127
1128                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1129                 let tag = Scalar {
1130                     value: Int(ity, signed),
1131                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1132                 };
1133                 let mut abi = Abi::Aggregate { sized: true };
1134                 if tag.value.size(dl) == size {
1135                     abi = Abi::Scalar(tag.clone());
1136                 } else {
1137                     // Try to use a ScalarPair for all tagged enums.
1138                     let mut common_prim = None;
1139                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1140                         let offsets = match layout_variant.fields {
1141                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1142                             _ => bug!(),
1143                         };
1144                         let mut fields =
1145                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1146                         let (field, offset) = match (fields.next(), fields.next()) {
1147                             (None, None) => continue,
1148                             (Some(pair), None) => pair,
1149                             _ => {
1150                                 common_prim = None;
1151                                 break;
1152                             }
1153                         };
1154                         let prim = match field.abi {
1155                             Abi::Scalar(ref scalar) => scalar.value,
1156                             _ => {
1157                                 common_prim = None;
1158                                 break;
1159                             }
1160                         };
1161                         if let Some(pair) = common_prim {
1162                             // This is pretty conservative. We could go fancier
1163                             // by conflating things like i32 and u32, or even
1164                             // realising that (u8, u8) could just cohabit with
1165                             // u16 or even u32.
1166                             if pair != (prim, offset) {
1167                                 common_prim = None;
1168                                 break;
1169                             }
1170                         } else {
1171                             common_prim = Some((prim, offset));
1172                         }
1173                     }
1174                     if let Some((prim, offset)) = common_prim {
1175                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1176                         let pair_offsets = match pair.fields {
1177                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1178                                 assert_eq!(memory_index, &[0, 1]);
1179                                 offsets
1180                             }
1181                             _ => bug!(),
1182                         };
1183                         if pair_offsets[0] == Size::ZERO
1184                             && pair_offsets[1] == *offset
1185                             && align == pair.align
1186                             && size == pair.size
1187                         {
1188                             // We can use `ScalarPair` only when it matches our
1189                             // already computed layout (including `#[repr(C)]`).
1190                             abi = pair.abi;
1191                         }
1192                     }
1193                 }
1194
1195                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1196                     abi = Abi::Uninhabited;
1197                 }
1198
1199                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1200
1201                 let tagged_layout = Layout {
1202                     variants: Variants::Multiple {
1203                         tag,
1204                         tag_encoding: TagEncoding::Direct,
1205                         tag_field: 0,
1206                         variants: layout_variants,
1207                     },
1208                     fields: FieldsShape::Arbitrary {
1209                         offsets: vec![Size::ZERO],
1210                         memory_index: vec![0],
1211                     },
1212                     largest_niche,
1213                     abi,
1214                     align,
1215                     size,
1216                 };
1217
1218                 let best_layout = match (tagged_layout, niche_filling_layout) {
1219                     (tagged_layout, Some(niche_filling_layout)) => {
1220                         // Pick the smaller layout; otherwise,
1221                         // pick the layout with the larger niche; otherwise,
1222                         // pick tagged as it has simpler codegen.
1223                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1224                             let niche_size =
1225                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1226                             (layout.size, cmp::Reverse(niche_size))
1227                         })
1228                     }
1229                     (tagged_layout, None) => tagged_layout,
1230                 };
1231
1232                 tcx.intern_layout(best_layout)
1233             }
1234
1235             // Types with no meaningful known layout.
1236             ty::Projection(_) | ty::Opaque(..) => {
1237                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1238                 if ty == normalized {
1239                     return Err(LayoutError::Unknown(ty));
1240                 }
1241                 tcx.layout_raw(param_env.and(normalized))?
1242             }
1243
1244             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1245                 bug!("Layout::compute: unexpected type `{}`", ty)
1246             }
1247
1248             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1249                 return Err(LayoutError::Unknown(ty));
1250             }
1251         })
1252     }
1253 }
1254
1255 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1256 #[derive(Clone, Debug, PartialEq)]
1257 enum SavedLocalEligibility {
1258     Unassigned,
1259     Assigned(VariantIdx),
1260     // FIXME: Use newtype_index so we aren't wasting bytes
1261     Ineligible(Option<u32>),
1262 }
1263
1264 // When laying out generators, we divide our saved local fields into two
1265 // categories: overlap-eligible and overlap-ineligible.
1266 //
1267 // Those fields which are ineligible for overlap go in a "prefix" at the
1268 // beginning of the layout, and always have space reserved for them.
1269 //
1270 // Overlap-eligible fields are only assigned to one variant, so we lay
1271 // those fields out for each variant and put them right after the
1272 // prefix.
1273 //
1274 // Finally, in the layout details, we point to the fields from the
1275 // variants they are assigned to. It is possible for some fields to be
1276 // included in multiple variants. No field ever "moves around" in the
1277 // layout; its offset is always the same.
1278 //
1279 // Also included in the layout are the upvars and the discriminant.
1280 // These are included as fields on the "outer" layout; they are not part
1281 // of any variant.
1282 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1283     /// Compute the eligibility and assignment of each local.
1284     fn generator_saved_local_eligibility(
1285         &self,
1286         info: &GeneratorLayout<'tcx>,
1287     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1288         use SavedLocalEligibility::*;
1289
1290         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1291             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1292
1293         // The saved locals not eligible for overlap. These will get
1294         // "promoted" to the prefix of our generator.
1295         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1296
1297         // Figure out which of our saved locals are fields in only
1298         // one variant. The rest are deemed ineligible for overlap.
1299         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1300             for local in fields {
1301                 match assignments[*local] {
1302                     Unassigned => {
1303                         assignments[*local] = Assigned(variant_index);
1304                     }
1305                     Assigned(idx) => {
1306                         // We've already seen this local at another suspension
1307                         // point, so it is no longer a candidate.
1308                         trace!(
1309                             "removing local {:?} in >1 variant ({:?}, {:?})",
1310                             local,
1311                             variant_index,
1312                             idx
1313                         );
1314                         ineligible_locals.insert(*local);
1315                         assignments[*local] = Ineligible(None);
1316                     }
1317                     Ineligible(_) => {}
1318                 }
1319             }
1320         }
1321
1322         // Next, check every pair of eligible locals to see if they
1323         // conflict.
1324         for local_a in info.storage_conflicts.rows() {
1325             let conflicts_a = info.storage_conflicts.count(local_a);
1326             if ineligible_locals.contains(local_a) {
1327                 continue;
1328             }
1329
1330             for local_b in info.storage_conflicts.iter(local_a) {
1331                 // local_a and local_b are storage live at the same time, therefore they
1332                 // cannot overlap in the generator layout. The only way to guarantee
1333                 // this is if they are in the same variant, or one is ineligible
1334                 // (which means it is stored in every variant).
1335                 if ineligible_locals.contains(local_b)
1336                     || assignments[local_a] == assignments[local_b]
1337                 {
1338                     continue;
1339                 }
1340
1341                 // If they conflict, we will choose one to make ineligible.
1342                 // This is not always optimal; it's just a greedy heuristic that
1343                 // seems to produce good results most of the time.
1344                 let conflicts_b = info.storage_conflicts.count(local_b);
1345                 let (remove, other) =
1346                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1347                 ineligible_locals.insert(remove);
1348                 assignments[remove] = Ineligible(None);
1349                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1350             }
1351         }
1352
1353         // Count the number of variants in use. If only one of them, then it is
1354         // impossible to overlap any locals in our layout. In this case it's
1355         // always better to make the remaining locals ineligible, so we can
1356         // lay them out with the other locals in the prefix and eliminate
1357         // unnecessary padding bytes.
1358         {
1359             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1360             for assignment in &assignments {
1361                 if let Assigned(idx) = assignment {
1362                     used_variants.insert(*idx);
1363                 }
1364             }
1365             if used_variants.count() < 2 {
1366                 for assignment in assignments.iter_mut() {
1367                     *assignment = Ineligible(None);
1368                 }
1369                 ineligible_locals.insert_all();
1370             }
1371         }
1372
1373         // Write down the order of our locals that will be promoted to the prefix.
1374         {
1375             for (idx, local) in ineligible_locals.iter().enumerate() {
1376                 assignments[local] = Ineligible(Some(idx as u32));
1377             }
1378         }
1379         debug!("generator saved local assignments: {:?}", assignments);
1380
1381         (ineligible_locals, assignments)
1382     }
1383
1384     /// Compute the full generator layout.
1385     fn generator_layout(
1386         &self,
1387         ty: Ty<'tcx>,
1388         def_id: hir::def_id::DefId,
1389         substs: SubstsRef<'tcx>,
1390     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1391         use SavedLocalEligibility::*;
1392         let tcx = self.tcx;
1393
1394         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1395
1396         let info = tcx.generator_layout(def_id);
1397         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1398
1399         // Build a prefix layout, including "promoting" all ineligible
1400         // locals as part of the prefix. We compute the layout of all of
1401         // these fields at once to get optimal packing.
1402         let tag_index = substs.as_generator().prefix_tys().count();
1403
1404         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1405         let max_discr = (info.variant_fields.len() - 1) as u128;
1406         let discr_int = Integer::fit_unsigned(max_discr);
1407         let discr_int_ty = discr_int.to_ty(tcx, false);
1408         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1409         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1410         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1411
1412         let promoted_layouts = ineligible_locals
1413             .iter()
1414             .map(|local| subst_field(info.field_tys[local]))
1415             .map(|ty| tcx.mk_maybe_uninit(ty))
1416             .map(|ty| self.layout_of(ty));
1417         let prefix_layouts = substs
1418             .as_generator()
1419             .prefix_tys()
1420             .map(|ty| self.layout_of(ty))
1421             .chain(iter::once(Ok(tag_layout)))
1422             .chain(promoted_layouts)
1423             .collect::<Result<Vec<_>, _>>()?;
1424         let prefix = self.univariant_uninterned(
1425             ty,
1426             &prefix_layouts,
1427             &ReprOptions::default(),
1428             StructKind::AlwaysSized,
1429         )?;
1430
1431         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1432
1433         // Split the prefix layout into the "outer" fields (upvars and
1434         // discriminant) and the "promoted" fields. Promoted fields will
1435         // get included in each variant that requested them in
1436         // GeneratorLayout.
1437         debug!("prefix = {:#?}", prefix);
1438         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1439             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1440                 let mut inverse_memory_index = invert_mapping(&memory_index);
1441
1442                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1443                 // "outer" and "promoted" fields respectively.
1444                 let b_start = (tag_index + 1) as u32;
1445                 let offsets_b = offsets.split_off(b_start as usize);
1446                 let offsets_a = offsets;
1447
1448                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1449                 // by preserving the order but keeping only one disjoint "half" each.
1450                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1451                 let inverse_memory_index_b: Vec<_> =
1452                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1453                 inverse_memory_index.retain(|&i| i < b_start);
1454                 let inverse_memory_index_a = inverse_memory_index;
1455
1456                 // Since `inverse_memory_index_{a,b}` each only refer to their
1457                 // respective fields, they can be safely inverted
1458                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1459                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1460
1461                 let outer_fields =
1462                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1463                 (outer_fields, offsets_b, memory_index_b)
1464             }
1465             _ => bug!(),
1466         };
1467
1468         let mut size = prefix.size;
1469         let mut align = prefix.align;
1470         let variants = info
1471             .variant_fields
1472             .iter_enumerated()
1473             .map(|(index, variant_fields)| {
1474                 // Only include overlap-eligible fields when we compute our variant layout.
1475                 let variant_only_tys = variant_fields
1476                     .iter()
1477                     .filter(|local| match assignments[**local] {
1478                         Unassigned => bug!(),
1479                         Assigned(v) if v == index => true,
1480                         Assigned(_) => bug!("assignment does not match variant"),
1481                         Ineligible(_) => false,
1482                     })
1483                     .map(|local| subst_field(info.field_tys[*local]));
1484
1485                 let mut variant = self.univariant_uninterned(
1486                     ty,
1487                     &variant_only_tys
1488                         .map(|ty| self.layout_of(ty))
1489                         .collect::<Result<Vec<_>, _>>()?,
1490                     &ReprOptions::default(),
1491                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1492                 )?;
1493                 variant.variants = Variants::Single { index };
1494
1495                 let (offsets, memory_index) = match variant.fields {
1496                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1497                     _ => bug!(),
1498                 };
1499
1500                 // Now, stitch the promoted and variant-only fields back together in
1501                 // the order they are mentioned by our GeneratorLayout.
1502                 // Because we only use some subset (that can differ between variants)
1503                 // of the promoted fields, we can't just pick those elements of the
1504                 // `promoted_memory_index` (as we'd end up with gaps).
1505                 // So instead, we build an "inverse memory_index", as if all of the
1506                 // promoted fields were being used, but leave the elements not in the
1507                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1508                 // obtain a valid (bijective) mapping.
1509                 const INVALID_FIELD_IDX: u32 = !0;
1510                 let mut combined_inverse_memory_index =
1511                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1512                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1513                 let combined_offsets = variant_fields
1514                     .iter()
1515                     .enumerate()
1516                     .map(|(i, local)| {
1517                         let (offset, memory_index) = match assignments[*local] {
1518                             Unassigned => bug!(),
1519                             Assigned(_) => {
1520                                 let (offset, memory_index) =
1521                                     offsets_and_memory_index.next().unwrap();
1522                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1523                             }
1524                             Ineligible(field_idx) => {
1525                                 let field_idx = field_idx.unwrap() as usize;
1526                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1527                             }
1528                         };
1529                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1530                         offset
1531                     })
1532                     .collect();
1533
1534                 // Remove the unused slots and invert the mapping to obtain the
1535                 // combined `memory_index` (also see previous comment).
1536                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1537                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1538
1539                 variant.fields = FieldsShape::Arbitrary {
1540                     offsets: combined_offsets,
1541                     memory_index: combined_memory_index,
1542                 };
1543
1544                 size = size.max(variant.size);
1545                 align = align.max(variant.align);
1546                 Ok(variant)
1547             })
1548             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1549
1550         size = size.align_to(align.abi);
1551
1552         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1553         {
1554             Abi::Uninhabited
1555         } else {
1556             Abi::Aggregate { sized: true }
1557         };
1558
1559         let layout = tcx.intern_layout(Layout {
1560             variants: Variants::Multiple {
1561                 tag: tag,
1562                 tag_encoding: TagEncoding::Direct,
1563                 tag_field: tag_index,
1564                 variants,
1565             },
1566             fields: outer_fields,
1567             abi,
1568             largest_niche: prefix.largest_niche,
1569             size,
1570             align,
1571         });
1572         debug!("generator layout ({:?}): {:#?}", ty, layout);
1573         Ok(layout)
1574     }
1575
1576     /// This is invoked by the `layout_raw` query to record the final
1577     /// layout of each type.
1578     #[inline(always)]
1579     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1580         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1581         // for dumping later.
1582         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1583             self.record_layout_for_printing_outlined(layout)
1584         }
1585     }
1586
1587     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1588         // Ignore layouts that are done with non-empty environments or
1589         // non-monomorphic layouts, as the user only wants to see the stuff
1590         // resulting from the final codegen session.
1591         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1592             return;
1593         }
1594
1595         // (delay format until we actually need it)
1596         let record = |kind, packed, opt_discr_size, variants| {
1597             let type_desc = format!("{:?}", layout.ty);
1598             self.tcx.sess.code_stats.record_type_size(
1599                 kind,
1600                 type_desc,
1601                 layout.align.abi,
1602                 layout.size,
1603                 packed,
1604                 opt_discr_size,
1605                 variants,
1606             );
1607         };
1608
1609         let adt_def = match *layout.ty.kind() {
1610             ty::Adt(ref adt_def, _) => {
1611                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1612                 adt_def
1613             }
1614
1615             ty::Closure(..) => {
1616                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1617                 record(DataTypeKind::Closure, false, None, vec![]);
1618                 return;
1619             }
1620
1621             _ => {
1622                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1623                 return;
1624             }
1625         };
1626
1627         let adt_kind = adt_def.adt_kind();
1628         let adt_packed = adt_def.repr.pack.is_some();
1629
1630         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1631             let mut min_size = Size::ZERO;
1632             let field_info: Vec<_> = flds
1633                 .iter()
1634                 .enumerate()
1635                 .map(|(i, &name)| match layout.field(self, i) {
1636                     Err(err) => {
1637                         bug!("no layout found for field {}: `{:?}`", name, err);
1638                     }
1639                     Ok(field_layout) => {
1640                         let offset = layout.fields.offset(i);
1641                         let field_end = offset + field_layout.size;
1642                         if min_size < field_end {
1643                             min_size = field_end;
1644                         }
1645                         FieldInfo {
1646                             name: name.to_string(),
1647                             offset: offset.bytes(),
1648                             size: field_layout.size.bytes(),
1649                             align: field_layout.align.abi.bytes(),
1650                         }
1651                     }
1652                 })
1653                 .collect();
1654
1655             VariantInfo {
1656                 name: n.map(|n| n.to_string()),
1657                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1658                 align: layout.align.abi.bytes(),
1659                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1660                 fields: field_info,
1661             }
1662         };
1663
1664         match layout.variants {
1665             Variants::Single { index } => {
1666                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1667                 if !adt_def.variants.is_empty() {
1668                     let variant_def = &adt_def.variants[index];
1669                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1670                     record(
1671                         adt_kind.into(),
1672                         adt_packed,
1673                         None,
1674                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1675                     );
1676                 } else {
1677                     // (This case arises for *empty* enums; so give it
1678                     // zero variants.)
1679                     record(adt_kind.into(), adt_packed, None, vec![]);
1680                 }
1681             }
1682
1683             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1684                 debug!(
1685                     "print-type-size `{:#?}` adt general variants def {}",
1686                     layout.ty,
1687                     adt_def.variants.len()
1688                 );
1689                 let variant_infos: Vec<_> = adt_def
1690                     .variants
1691                     .iter_enumerated()
1692                     .map(|(i, variant_def)| {
1693                         let fields: Vec<_> =
1694                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1695                         build_variant_info(
1696                             Some(variant_def.ident),
1697                             &fields,
1698                             layout.for_variant(self, i),
1699                         )
1700                     })
1701                     .collect();
1702                 record(
1703                     adt_kind.into(),
1704                     adt_packed,
1705                     match tag_encoding {
1706                         TagEncoding::Direct => Some(tag.value.size(self)),
1707                         _ => None,
1708                     },
1709                     variant_infos,
1710                 );
1711             }
1712         }
1713     }
1714 }
1715
1716 /// Type size "skeleton", i.e., the only information determining a type's size.
1717 /// While this is conservative, (aside from constant sizes, only pointers,
1718 /// newtypes thereof and null pointer optimized enums are allowed), it is
1719 /// enough to statically check common use cases of transmute.
1720 #[derive(Copy, Clone, Debug)]
1721 pub enum SizeSkeleton<'tcx> {
1722     /// Any statically computable Layout.
1723     Known(Size),
1724
1725     /// A potentially-fat pointer.
1726     Pointer {
1727         /// If true, this pointer is never null.
1728         non_zero: bool,
1729         /// The type which determines the unsized metadata, if any,
1730         /// of this pointer. Either a type parameter or a projection
1731         /// depending on one, with regions erased.
1732         tail: Ty<'tcx>,
1733     },
1734 }
1735
1736 impl<'tcx> SizeSkeleton<'tcx> {
1737     pub fn compute(
1738         ty: Ty<'tcx>,
1739         tcx: TyCtxt<'tcx>,
1740         param_env: ty::ParamEnv<'tcx>,
1741     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1742         debug_assert!(!ty.has_infer_types_or_consts());
1743
1744         // First try computing a static layout.
1745         let err = match tcx.layout_of(param_env.and(ty)) {
1746             Ok(layout) => {
1747                 return Ok(SizeSkeleton::Known(layout.size));
1748             }
1749             Err(err) => err,
1750         };
1751
1752         match *ty.kind() {
1753             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1754                 let non_zero = !ty.is_unsafe_ptr();
1755                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1756                 match tail.kind() {
1757                     ty::Param(_) | ty::Projection(_) => {
1758                         debug_assert!(tail.has_param_types_or_consts());
1759                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1760                     }
1761                     _ => bug!(
1762                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1763                               tail `{}` is not a type parameter or a projection",
1764                         ty,
1765                         err,
1766                         tail
1767                     ),
1768                 }
1769             }
1770
1771             ty::Adt(def, substs) => {
1772                 // Only newtypes and enums w/ nullable pointer optimization.
1773                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1774                     return Err(err);
1775                 }
1776
1777                 // Get a zero-sized variant or a pointer newtype.
1778                 let zero_or_ptr_variant = |i| {
1779                     let i = VariantIdx::new(i);
1780                     let fields = def.variants[i]
1781                         .fields
1782                         .iter()
1783                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1784                     let mut ptr = None;
1785                     for field in fields {
1786                         let field = field?;
1787                         match field {
1788                             SizeSkeleton::Known(size) => {
1789                                 if size.bytes() > 0 {
1790                                     return Err(err);
1791                                 }
1792                             }
1793                             SizeSkeleton::Pointer { .. } => {
1794                                 if ptr.is_some() {
1795                                     return Err(err);
1796                                 }
1797                                 ptr = Some(field);
1798                             }
1799                         }
1800                     }
1801                     Ok(ptr)
1802                 };
1803
1804                 let v0 = zero_or_ptr_variant(0)?;
1805                 // Newtype.
1806                 if def.variants.len() == 1 {
1807                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1808                         return Ok(SizeSkeleton::Pointer {
1809                             non_zero: non_zero
1810                                 || match tcx.layout_scalar_valid_range(def.did) {
1811                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1812                                     (Bound::Included(start), Bound::Included(end)) => {
1813                                         0 < start && start < end
1814                                     }
1815                                     _ => false,
1816                                 },
1817                             tail,
1818                         });
1819                     } else {
1820                         return Err(err);
1821                     }
1822                 }
1823
1824                 let v1 = zero_or_ptr_variant(1)?;
1825                 // Nullable pointer enum optimization.
1826                 match (v0, v1) {
1827                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1828                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1829                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1830                     }
1831                     _ => Err(err),
1832                 }
1833             }
1834
1835             ty::Projection(_) | ty::Opaque(..) => {
1836                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1837                 if ty == normalized {
1838                     Err(err)
1839                 } else {
1840                     SizeSkeleton::compute(normalized, tcx, param_env)
1841                 }
1842             }
1843
1844             _ => Err(err),
1845         }
1846     }
1847
1848     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1849         match (self, other) {
1850             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1851             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1852                 a == b
1853             }
1854             _ => false,
1855         }
1856     }
1857 }
1858
1859 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1860     fn tcx(&self) -> TyCtxt<'tcx>;
1861 }
1862
1863 pub trait HasParamEnv<'tcx> {
1864     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1865 }
1866
1867 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1868     fn data_layout(&self) -> &TargetDataLayout {
1869         &self.data_layout
1870     }
1871 }
1872
1873 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1874     fn tcx(&self) -> TyCtxt<'tcx> {
1875         *self
1876     }
1877 }
1878
1879 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1880     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1881         self.param_env
1882     }
1883 }
1884
1885 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1886     fn data_layout(&self) -> &TargetDataLayout {
1887         self.tcx.data_layout()
1888     }
1889 }
1890
1891 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1892     fn tcx(&self) -> TyCtxt<'tcx> {
1893         self.tcx.tcx()
1894     }
1895 }
1896
1897 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1898
1899 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1900     type Ty = Ty<'tcx>;
1901     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1902
1903     /// Computes the layout of a type. Note that this implicitly
1904     /// executes in "reveal all" mode.
1905     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1906         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1907         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1908         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1909         let layout = TyAndLayout { ty, layout };
1910
1911         // N.B., this recording is normally disabled; when enabled, it
1912         // can however trigger recursive invocations of `layout_of`.
1913         // Therefore, we execute it *after* the main query has
1914         // completed, to avoid problems around recursive structures
1915         // and the like. (Admittedly, I wasn't able to reproduce a problem
1916         // here, but it seems like the right thing to do. -nmatsakis)
1917         self.record_layout_for_printing(layout);
1918
1919         Ok(layout)
1920     }
1921 }
1922
1923 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1924     type Ty = Ty<'tcx>;
1925     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1926
1927     /// Computes the layout of a type. Note that this implicitly
1928     /// executes in "reveal all" mode.
1929     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1930         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
1931         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1932         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1933         let layout = TyAndLayout { ty, layout };
1934
1935         // N.B., this recording is normally disabled; when enabled, it
1936         // can however trigger recursive invocations of `layout_of`.
1937         // Therefore, we execute it *after* the main query has
1938         // completed, to avoid problems around recursive structures
1939         // and the like. (Admittedly, I wasn't able to reproduce a problem
1940         // here, but it seems like the right thing to do. -nmatsakis)
1941         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
1942         cx.record_layout_for_printing(layout);
1943
1944         Ok(layout)
1945     }
1946 }
1947
1948 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1949 impl TyCtxt<'tcx> {
1950     /// Computes the layout of a type. Note that this implicitly
1951     /// executes in "reveal all" mode.
1952     #[inline]
1953     pub fn layout_of(
1954         self,
1955         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1956     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1957         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
1958         cx.layout_of(param_env_and_ty.value)
1959     }
1960 }
1961
1962 impl ty::query::TyCtxtAt<'tcx> {
1963     /// Computes the layout of a type. Note that this implicitly
1964     /// executes in "reveal all" mode.
1965     #[inline]
1966     pub fn layout_of(
1967         self,
1968         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1969     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1970         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
1971         cx.layout_of(param_env_and_ty.value)
1972     }
1973 }
1974
1975 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
1976 where
1977     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
1978         + HasTyCtxt<'tcx>
1979         + HasParamEnv<'tcx>,
1980 {
1981     fn for_variant(
1982         this: TyAndLayout<'tcx>,
1983         cx: &C,
1984         variant_index: VariantIdx,
1985     ) -> TyAndLayout<'tcx> {
1986         let layout = match this.variants {
1987             Variants::Single { index }
1988                 // If all variants but one are uninhabited, the variant layout is the enum layout.
1989                 if index == variant_index &&
1990                 // Don't confuse variants of uninhabited enums with the enum itself.
1991                 // For more details see https://github.com/rust-lang/rust/issues/69763.
1992                 this.fields != FieldsShape::Primitive =>
1993             {
1994                 this.layout
1995             }
1996
1997             Variants::Single { index } => {
1998                 // Deny calling for_variant more than once for non-Single enums.
1999                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2000                     assert_eq!(original_layout.variants, Variants::Single { index });
2001                 }
2002
2003                 let fields = match this.ty.kind() {
2004                     ty::Adt(def, _) if def.variants.is_empty() =>
2005                         bug!("for_variant called on zero-variant enum"),
2006                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2007                     _ => bug!(),
2008                 };
2009                 let tcx = cx.tcx();
2010                 tcx.intern_layout(Layout {
2011                     variants: Variants::Single { index: variant_index },
2012                     fields: match NonZeroUsize::new(fields) {
2013                         Some(fields) => FieldsShape::Union(fields),
2014                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2015                     },
2016                     abi: Abi::Uninhabited,
2017                     largest_niche: None,
2018                     align: tcx.data_layout.i8_align,
2019                     size: Size::ZERO,
2020                 })
2021             }
2022
2023             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2024         };
2025
2026         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2027
2028         TyAndLayout { ty: this.ty, layout }
2029     }
2030
2031     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2032         let tcx = cx.tcx();
2033         let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2034             let layout = Layout::scalar(cx, tag.clone());
2035             MaybeResult::from(Ok(TyAndLayout {
2036                 layout: tcx.intern_layout(layout),
2037                 ty: tag.value.to_ty(tcx),
2038             }))
2039         };
2040
2041         cx.layout_of(match *this.ty.kind() {
2042             ty::Bool
2043             | ty::Char
2044             | ty::Int(_)
2045             | ty::Uint(_)
2046             | ty::Float(_)
2047             | ty::FnPtr(_)
2048             | ty::Never
2049             | ty::FnDef(..)
2050             | ty::GeneratorWitness(..)
2051             | ty::Foreign(..)
2052             | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2053
2054             // Potentially-fat pointers.
2055             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2056                 assert!(i < this.fields.count());
2057
2058                 // Reuse the fat `*T` type as its own thin pointer data field.
2059                 // This provides information about, e.g., DST struct pointees
2060                 // (which may have no non-DST form), and will work as long
2061                 // as the `Abi` or `FieldsShape` is checked by users.
2062                 if i == 0 {
2063                     let nil = tcx.mk_unit();
2064                     let ptr_ty = if this.ty.is_unsafe_ptr() {
2065                         tcx.mk_mut_ptr(nil)
2066                     } else {
2067                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2068                     };
2069                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2070                         |mut ptr_layout| {
2071                             ptr_layout.ty = this.ty;
2072                             ptr_layout
2073                         },
2074                     ));
2075                 }
2076
2077                 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2078                     ty::Slice(_) | ty::Str => tcx.types.usize,
2079                     ty::Dynamic(_, _) => {
2080                         tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2081                         /* FIXME: use actual fn pointers
2082                         Warning: naively computing the number of entries in the
2083                         vtable by counting the methods on the trait + methods on
2084                         all parent traits does not work, because some methods can
2085                         be not object safe and thus excluded from the vtable.
2086                         Increase this counter if you tried to implement this but
2087                         failed to do it without duplicating a lot of code from
2088                         other places in the compiler: 2
2089                         tcx.mk_tup(&[
2090                             tcx.mk_array(tcx.types.usize, 3),
2091                             tcx.mk_array(Option<fn()>),
2092                         ])
2093                         */
2094                     }
2095                     _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2096                 }
2097             }
2098
2099             // Arrays and slices.
2100             ty::Array(element, _) | ty::Slice(element) => element,
2101             ty::Str => tcx.types.u8,
2102
2103             // Tuples, generators and closures.
2104             ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
2105
2106             ty::Generator(def_id, ref substs, _) => match this.variants {
2107                 Variants::Single { index } => substs
2108                     .as_generator()
2109                     .state_tys(def_id, tcx)
2110                     .nth(index.as_usize())
2111                     .unwrap()
2112                     .nth(i)
2113                     .unwrap(),
2114                 Variants::Multiple { ref tag, tag_field, .. } => {
2115                     if i == tag_field {
2116                         return tag_layout(tag);
2117                     }
2118                     substs.as_generator().prefix_tys().nth(i).unwrap()
2119                 }
2120             },
2121
2122             ty::Tuple(tys) => tys[i].expect_ty(),
2123
2124             // SIMD vector types.
2125             ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
2126
2127             // ADTs.
2128             ty::Adt(def, substs) => {
2129                 match this.variants {
2130                     Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2131
2132                     // Discriminant field for enums (where applicable).
2133                     Variants::Multiple { ref tag, .. } => {
2134                         assert_eq!(i, 0);
2135                         return tag_layout(tag);
2136                     }
2137                 }
2138             }
2139
2140             ty::Projection(_)
2141             | ty::Bound(..)
2142             | ty::Placeholder(..)
2143             | ty::Opaque(..)
2144             | ty::Param(_)
2145             | ty::Infer(_)
2146             | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2147         })
2148     }
2149
2150     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2151         let addr_space_of_ty = |ty: Ty<'tcx>| {
2152             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2153         };
2154
2155         let pointee_info = match *this.ty.kind() {
2156             ty::RawPtr(mt) if offset.bytes() == 0 => {
2157                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2158                     size: layout.size,
2159                     align: layout.align.abi,
2160                     safe: None,
2161                     address_space: addr_space_of_ty(mt.ty),
2162                 })
2163             }
2164             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2165                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2166                     PointeeInfo {
2167                         size: layout.size,
2168                         align: layout.align.abi,
2169                         safe: None,
2170                         address_space: cx.data_layout().instruction_address_space,
2171                     }
2172                 })
2173             }
2174             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2175                 let address_space = addr_space_of_ty(ty);
2176                 let tcx = cx.tcx();
2177                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2178                 let kind = match mt {
2179                     hir::Mutability::Not => {
2180                         if is_freeze {
2181                             PointerKind::Frozen
2182                         } else {
2183                             PointerKind::Shared
2184                         }
2185                     }
2186                     hir::Mutability::Mut => {
2187                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2188                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2189                         // in conjunction with unwinding, but later versions didn’t seem to have
2190                         // said issues. See issue #31681.
2191                         //
2192                         // Alas, later on we encountered a case where noalias would generate wrong
2193                         // code altogether even with recent versions of LLVM in *safe* code with no
2194                         // unwinding involved. See #54462.
2195                         //
2196                         // For now, do not enable mutable_noalias by default at all, while the
2197                         // issue is being figured out.
2198                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2199                             PointerKind::UniqueBorrowed
2200                         } else {
2201                             PointerKind::Shared
2202                         }
2203                     }
2204                 };
2205
2206                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2207                     size: layout.size,
2208                     align: layout.align.abi,
2209                     safe: Some(kind),
2210                     address_space,
2211                 })
2212             }
2213
2214             _ => {
2215                 let mut data_variant = match this.variants {
2216                     // Within the discriminant field, only the niche itself is
2217                     // always initialized, so we only check for a pointer at its
2218                     // offset.
2219                     //
2220                     // If the niche is a pointer, it's either valid (according
2221                     // to its type), or null (which the niche field's scalar
2222                     // validity range encodes).  This allows using
2223                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2224                     // this will continue to work as long as we don't start
2225                     // using more niches than just null (e.g., the first page of
2226                     // the address space, or unaligned pointers).
2227                     Variants::Multiple {
2228                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2229                         tag_field,
2230                         ..
2231                     } if this.fields.offset(tag_field) == offset => {
2232                         Some(this.for_variant(cx, dataful_variant))
2233                     }
2234                     _ => Some(this),
2235                 };
2236
2237                 if let Some(variant) = data_variant {
2238                     // We're not interested in any unions.
2239                     if let FieldsShape::Union(_) = variant.fields {
2240                         data_variant = None;
2241                     }
2242                 }
2243
2244                 let mut result = None;
2245
2246                 if let Some(variant) = data_variant {
2247                     let ptr_end = offset + Pointer.size(cx);
2248                     for i in 0..variant.fields.count() {
2249                         let field_start = variant.fields.offset(i);
2250                         if field_start <= offset {
2251                             let field = variant.field(cx, i);
2252                             result = field.to_result().ok().and_then(|field| {
2253                                 if ptr_end <= field_start + field.size {
2254                                     // We found the right field, look inside it.
2255                                     let field_info =
2256                                         field.pointee_info_at(cx, offset - field_start);
2257                                     field_info
2258                                 } else {
2259                                     None
2260                                 }
2261                             });
2262                             if result.is_some() {
2263                                 break;
2264                             }
2265                         }
2266                     }
2267                 }
2268
2269                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2270                 if let Some(ref mut pointee) = result {
2271                     if let ty::Adt(def, _) = this.ty.kind() {
2272                         if def.is_box() && offset.bytes() == 0 {
2273                             pointee.safe = Some(PointerKind::UniqueOwned);
2274                         }
2275                     }
2276                 }
2277
2278                 result
2279             }
2280         };
2281
2282         debug!(
2283             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2284             offset,
2285             this.ty.kind(),
2286             pointee_info
2287         );
2288
2289         pointee_info
2290     }
2291 }
2292
2293 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2294     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2295         use crate::ty::layout::LayoutError::*;
2296         mem::discriminant(self).hash_stable(hcx, hasher);
2297
2298         match *self {
2299             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2300         }
2301     }
2302 }
2303
2304 impl<'tcx> ty::Instance<'tcx> {
2305     // NOTE(eddyb) this is private to avoid using it from outside of
2306     // `FnAbi::of_instance` - any other uses are either too high-level
2307     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2308     // or should go through `FnAbi` instead, to avoid losing any
2309     // adjustments `FnAbi::of_instance` might be performing.
2310     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2311         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2312         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2313         match *ty.kind() {
2314             ty::FnDef(..) => {
2315                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2316                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2317                 // (i.e. due to being inside a projection that got normalized, see
2318                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2319                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2320                 let mut sig = match *ty.kind() {
2321                     ty::FnDef(def_id, substs) => tcx
2322                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2323                         .subst(tcx, substs),
2324                     _ => unreachable!(),
2325                 };
2326
2327                 if let ty::InstanceDef::VtableShim(..) = self.def {
2328                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2329                     sig = sig.map_bound(|mut sig| {
2330                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2331                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2332                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2333                         sig
2334                     });
2335                 }
2336                 sig
2337             }
2338             ty::Closure(def_id, substs) => {
2339                 let sig = substs.as_closure().sig();
2340
2341                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2342                 sig.map_bound(|sig| {
2343                     tcx.mk_fn_sig(
2344                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2345                         sig.output(),
2346                         sig.c_variadic,
2347                         sig.unsafety,
2348                         sig.abi,
2349                     )
2350                 })
2351             }
2352             ty::Generator(_, substs, _) => {
2353                 let sig = substs.as_generator().poly_sig();
2354
2355                 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2356                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2357
2358                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2359                 let pin_adt_ref = tcx.adt_def(pin_did);
2360                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2361                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2362
2363                 sig.map_bound(|sig| {
2364                     let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2365                     let state_adt_ref = tcx.adt_def(state_did);
2366                     let state_substs =
2367                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2368                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2369
2370                     tcx.mk_fn_sig(
2371                         [env_ty, sig.resume_ty].iter(),
2372                         &ret_ty,
2373                         false,
2374                         hir::Unsafety::Normal,
2375                         rustc_target::spec::abi::Abi::Rust,
2376                     )
2377                 })
2378             }
2379             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2380         }
2381     }
2382 }
2383
2384 pub trait FnAbiExt<'tcx, C>
2385 where
2386     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2387         + HasDataLayout
2388         + HasTargetSpec
2389         + HasTyCtxt<'tcx>
2390         + HasParamEnv<'tcx>,
2391 {
2392     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2393     ///
2394     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2395     /// instead, where the instance is a `InstanceDef::Virtual`.
2396     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2397
2398     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2399     /// direct calls to an `fn`.
2400     ///
2401     /// NB: that includes virtual calls, which are represented by "direct calls"
2402     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2403     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2404
2405     fn new_internal(
2406         cx: &C,
2407         sig: ty::PolyFnSig<'tcx>,
2408         extra_args: &[Ty<'tcx>],
2409         caller_location: Option<Ty<'tcx>>,
2410         codegen_fn_attr_flags: CodegenFnAttrFlags,
2411         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2412     ) -> Self;
2413     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2414 }
2415
2416 fn fn_can_unwind(
2417     panic_strategy: PanicStrategy,
2418     codegen_fn_attr_flags: CodegenFnAttrFlags,
2419     call_conv: Conv,
2420 ) -> bool {
2421     if panic_strategy != PanicStrategy::Unwind {
2422         // In panic=abort mode we assume nothing can unwind anywhere, so
2423         // optimize based on this!
2424         false
2425     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2426         // If a specific #[unwind] attribute is present, use that.
2427         true
2428     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2429         // Special attribute for allocator functions, which can't unwind.
2430         false
2431     } else {
2432         if call_conv == Conv::Rust {
2433             // Any Rust method (or `extern "Rust" fn` or `extern
2434             // "rust-call" fn`) is explicitly allowed to unwind
2435             // (unless it has no-unwind attribute, handled above).
2436             true
2437         } else {
2438             // Anything else is either:
2439             //
2440             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2441             //
2442             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2443             //
2444             // Foreign items (case 1) are assumed to not unwind; it is
2445             // UB otherwise. (At least for now; see also
2446             // rust-lang/rust#63909 and Rust RFC 2753.)
2447             //
2448             // Items defined in Rust with non-Rust ABIs (case 2) are also
2449             // not supposed to unwind. Whether this should be enforced
2450             // (versus stating it is UB) and *how* it would be enforced
2451             // is currently under discussion; see rust-lang/rust#58794.
2452             //
2453             // In either case, we mark item as explicitly nounwind.
2454             false
2455         }
2456     }
2457 }
2458
2459 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2460 where
2461     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2462         + HasDataLayout
2463         + HasTargetSpec
2464         + HasTyCtxt<'tcx>
2465         + HasParamEnv<'tcx>,
2466 {
2467     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2468         // Assume that fn pointers may always unwind
2469         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2470
2471         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2472             ArgAbi::new(cx.layout_of(ty))
2473         })
2474     }
2475
2476     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2477         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2478
2479         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2480             Some(cx.tcx().caller_location_ty())
2481         } else {
2482             None
2483         };
2484
2485         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2486
2487         call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2488             let mut layout = cx.layout_of(ty);
2489             // Don't pass the vtable, it's not an argument of the virtual fn.
2490             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2491             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2492             if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2493                 let fat_pointer_ty = if layout.is_unsized() {
2494                     // unsized `self` is passed as a pointer to `self`
2495                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2496                     cx.tcx().mk_mut_ptr(layout.ty)
2497                 } else {
2498                     match layout.abi {
2499                         Abi::ScalarPair(..) => (),
2500                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2501                     }
2502
2503                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2504                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2505                     // elsewhere in the compiler as a method on a `dyn Trait`.
2506                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2507                     // get a built-in pointer type
2508                     let mut fat_pointer_layout = layout;
2509                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2510                         && !fat_pointer_layout.ty.is_region_ptr()
2511                     {
2512                         for i in 0..fat_pointer_layout.fields.count() {
2513                             let field_layout = fat_pointer_layout.field(cx, i);
2514
2515                             if !field_layout.is_zst() {
2516                                 fat_pointer_layout = field_layout;
2517                                 continue 'descend_newtypes;
2518                             }
2519                         }
2520
2521                         bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2522                     }
2523
2524                     fat_pointer_layout.ty
2525                 };
2526
2527                 // we now have a type like `*mut RcBox<dyn Trait>`
2528                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2529                 // this is understood as a special case elsewhere in the compiler
2530                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2531                 layout = cx.layout_of(unit_pointer_ty);
2532                 layout.ty = fat_pointer_ty;
2533             }
2534             ArgAbi::new(layout)
2535         })
2536     }
2537
2538     fn new_internal(
2539         cx: &C,
2540         sig: ty::PolyFnSig<'tcx>,
2541         extra_args: &[Ty<'tcx>],
2542         caller_location: Option<Ty<'tcx>>,
2543         codegen_fn_attr_flags: CodegenFnAttrFlags,
2544         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2545     ) -> Self {
2546         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2547
2548         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2549
2550         use rustc_target::spec::abi::Abi::*;
2551         let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2552             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2553
2554             // It's the ABI's job to select this, not ours.
2555             System => bug!("system abi should be selected elsewhere"),
2556             EfiApi => bug!("eficall abi should be selected elsewhere"),
2557
2558             Stdcall => Conv::X86Stdcall,
2559             Fastcall => Conv::X86Fastcall,
2560             Vectorcall => Conv::X86VectorCall,
2561             Thiscall => Conv::X86ThisCall,
2562             C => Conv::C,
2563             Unadjusted => Conv::C,
2564             Win64 => Conv::X86_64Win64,
2565             SysV64 => Conv::X86_64SysV,
2566             Aapcs => Conv::ArmAapcs,
2567             PtxKernel => Conv::PtxKernel,
2568             Msp430Interrupt => Conv::Msp430Intr,
2569             X86Interrupt => Conv::X86Intr,
2570             AmdGpuKernel => Conv::AmdGpuKernel,
2571             AvrInterrupt => Conv::AvrInterrupt,
2572             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2573
2574             // These API constants ought to be more specific...
2575             Cdecl => Conv::C,
2576         };
2577
2578         let mut inputs = sig.inputs();
2579         let extra_args = if sig.abi == RustCall {
2580             assert!(!sig.c_variadic && extra_args.is_empty());
2581
2582             if let Some(input) = sig.inputs().last() {
2583                 if let ty::Tuple(tupled_arguments) = input.kind() {
2584                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2585                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2586                 } else {
2587                     bug!(
2588                         "argument to function with \"rust-call\" ABI \
2589                             is not a tuple"
2590                     );
2591                 }
2592             } else {
2593                 bug!(
2594                     "argument to function with \"rust-call\" ABI \
2595                         is not a tuple"
2596                 );
2597             }
2598         } else {
2599             assert!(sig.c_variadic || extra_args.is_empty());
2600             extra_args.to_vec()
2601         };
2602
2603         let target = &cx.tcx().sess.target;
2604         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2605         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2606         let linux_s390x_gnu_like =
2607             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2608         let linux_sparc64_gnu_like =
2609             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2610         let linux_powerpc_gnu_like =
2611             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2612         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2613
2614         // Handle safe Rust thin and fat pointers.
2615         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2616                                       scalar: &Scalar,
2617                                       layout: TyAndLayout<'tcx>,
2618                                       offset: Size,
2619                                       is_return: bool| {
2620             // Booleans are always an i1 that needs to be zero-extended.
2621             if scalar.is_bool() {
2622                 attrs.set(ArgAttribute::ZExt);
2623                 return;
2624             }
2625
2626             // Only pointer types handled below.
2627             if scalar.value != Pointer {
2628                 return;
2629             }
2630
2631             if scalar.valid_range.start() < scalar.valid_range.end() {
2632                 if *scalar.valid_range.start() > 0 {
2633                     attrs.set(ArgAttribute::NonNull);
2634                 }
2635             }
2636
2637             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2638                 if let Some(kind) = pointee.safe {
2639                     attrs.pointee_align = Some(pointee.align);
2640
2641                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2642                     // for the entire duration of the function as they can be deallocated
2643                     // at any time. Set their valid size to 0.
2644                     attrs.pointee_size = match kind {
2645                         PointerKind::UniqueOwned => Size::ZERO,
2646                         _ => pointee.size,
2647                     };
2648
2649                     // `Box` pointer parameters never alias because ownership is transferred
2650                     // `&mut` pointer parameters never alias other parameters,
2651                     // or mutable global data
2652                     //
2653                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2654                     // and can be marked as both `readonly` and `noalias`, as
2655                     // LLVM's definition of `noalias` is based solely on memory
2656                     // dependencies rather than pointer equality
2657                     let no_alias = match kind {
2658                         PointerKind::Shared => false,
2659                         PointerKind::UniqueOwned => true,
2660                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2661                     };
2662                     if no_alias {
2663                         attrs.set(ArgAttribute::NoAlias);
2664                     }
2665
2666                     if kind == PointerKind::Frozen && !is_return {
2667                         attrs.set(ArgAttribute::ReadOnly);
2668                     }
2669                 }
2670             }
2671         };
2672
2673         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2674             let is_return = arg_idx.is_none();
2675             let mut arg = mk_arg_type(ty, arg_idx);
2676             if arg.layout.is_zst() {
2677                 // For some forsaken reason, x86_64-pc-windows-gnu
2678                 // doesn't ignore zero-sized struct arguments.
2679                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2680                 if is_return
2681                     || rust_abi
2682                     || (!win_x64_gnu
2683                         && !linux_s390x_gnu_like
2684                         && !linux_sparc64_gnu_like
2685                         && !linux_powerpc_gnu_like)
2686                 {
2687                     arg.mode = PassMode::Ignore;
2688                 }
2689             }
2690
2691             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2692             if !is_return && rust_abi {
2693                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2694                     let mut a_attrs = ArgAttributes::new();
2695                     let mut b_attrs = ArgAttributes::new();
2696                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2697                     adjust_for_rust_scalar(
2698                         &mut b_attrs,
2699                         b,
2700                         arg.layout,
2701                         a.value.size(cx).align_to(b.value.align(cx).abi),
2702                         false,
2703                     );
2704                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2705                     return arg;
2706                 }
2707             }
2708
2709             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2710                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2711                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2712                 }
2713             }
2714
2715             arg
2716         };
2717
2718         let mut fn_abi = FnAbi {
2719             ret: arg_of(sig.output(), None),
2720             args: inputs
2721                 .iter()
2722                 .cloned()
2723                 .chain(extra_args)
2724                 .chain(caller_location)
2725                 .enumerate()
2726                 .map(|(i, ty)| arg_of(ty, Some(i)))
2727                 .collect(),
2728             c_variadic: sig.c_variadic,
2729             fixed_count: inputs.len(),
2730             conv,
2731             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2732         };
2733         fn_abi.adjust_for_abi(cx, sig.abi);
2734         debug!("FnAbi::new_internal = {:?}", fn_abi);
2735         fn_abi
2736     }
2737
2738     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2739         if abi == SpecAbi::Unadjusted {
2740             return;
2741         }
2742
2743         if abi == SpecAbi::Rust
2744             || abi == SpecAbi::RustCall
2745             || abi == SpecAbi::RustIntrinsic
2746             || abi == SpecAbi::PlatformIntrinsic
2747         {
2748             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, is_ret: bool| {
2749                 if arg.is_ignore() {
2750                     return;
2751                 }
2752
2753                 match arg.layout.abi {
2754                     Abi::Aggregate { .. } => {}
2755
2756                     // This is a fun case! The gist of what this is doing is
2757                     // that we want callers and callees to always agree on the
2758                     // ABI of how they pass SIMD arguments. If we were to *not*
2759                     // make these arguments indirect then they'd be immediates
2760                     // in LLVM, which means that they'd used whatever the
2761                     // appropriate ABI is for the callee and the caller. That
2762                     // means, for example, if the caller doesn't have AVX
2763                     // enabled but the callee does, then passing an AVX argument
2764                     // across this boundary would cause corrupt data to show up.
2765                     //
2766                     // This problem is fixed by unconditionally passing SIMD
2767                     // arguments through memory between callers and callees
2768                     // which should get them all to agree on ABI regardless of
2769                     // target feature sets. Some more information about this
2770                     // issue can be found in #44367.
2771                     //
2772                     // Note that the platform intrinsic ABI is exempt here as
2773                     // that's how we connect up to LLVM and it's unstable
2774                     // anyway, we control all calls to it in libstd.
2775                     Abi::Vector { .. }
2776                         if abi != SpecAbi::PlatformIntrinsic
2777                             && cx.tcx().sess.target.simd_types_indirect =>
2778                     {
2779                         arg.make_indirect();
2780                         return;
2781                     }
2782
2783                     _ => return,
2784                 }
2785
2786                 // Return structures up to 2 pointers in size by value, matching `ScalarPair`. LLVM
2787                 // will usually return these in 2 registers, which is more efficient than by-ref.
2788                 let max_by_val_size = if is_ret { Pointer.size(cx) * 2 } else { Pointer.size(cx) };
2789                 let size = arg.layout.size;
2790
2791                 if arg.layout.is_unsized() || size > max_by_val_size {
2792                     arg.make_indirect();
2793                 } else {
2794                     // We want to pass small aggregates as immediates, but using
2795                     // a LLVM aggregate type for this leads to bad optimizations,
2796                     // so we pick an appropriately sized integer type instead.
2797                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2798                 }
2799             };
2800             fixup(&mut self.ret, true);
2801             for arg in &mut self.args {
2802                 fixup(arg, false);
2803             }
2804             if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2805                 attrs.set(ArgAttribute::StructRet);
2806             }
2807             return;
2808         }
2809
2810         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2811             cx.tcx().sess.fatal(&msg);
2812         }
2813     }
2814 }