]> git.lizzy.rs Git - rust.git/blob - src/librustc_middle/ty/layout.rs
Rollup merge of #75485 - RalfJung:pin, r=nagisa
[rust.git] / src / librustc_middle / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast::ast::{self, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::{GeneratorStateLangItem, PinTypeLangItem};
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'tcx>(
34         tcx: TyCtxt<'tcx>,
35         ty: Ty<'tcx>,
36         repr: &ReprOptions,
37         min: i128,
38         max: i128,
39     ) -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Gets the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Finds the appropriate Integer type and signedness for the given
75     /// signed discriminant range and `#[repr]` attribute.
76     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'tcx>(
79         tcx: TyCtxt<'tcx>,
80         ty: Ty<'tcx>,
81         repr: &ReprOptions,
82         min: i128,
83         max: i128,
84     ) -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!(
100                     "Integer::repr_discr: `#[repr]` hint too small for \
101                       discriminant range of enum `{}",
102                     ty
103                 )
104             }
105             return (discr, ity.is_signed());
106         }
107
108         if repr.c() {
109             match &tcx.sess.target.target.arch[..] {
110                 // WARNING: the ARM EABI has two variants; the one corresponding
111                 // to `at_least == I32` appears to be used on Linux and NetBSD,
112                 // but some systems may use the variant corresponding to no
113                 // lower bound. However, we don't run on those yet...?
114                 "arm" => min_from_extern = Some(I32),
115                 _ => min_from_extern = Some(I32),
116             }
117         }
118
119         let at_least = min_from_extern.unwrap_or(min_default);
120
121         // If there are no negative values, we can use the unsigned fit.
122         if min >= 0 {
123             (cmp::max(unsigned_fit, at_least), false)
124         } else {
125             (cmp::max(signed_fit, at_least), true)
126         }
127     }
128 }
129
130 pub trait PrimitiveExt {
131     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
133 }
134
135 impl PrimitiveExt for Primitive {
136     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
137         match *self {
138             Int(i, signed) => i.to_ty(tcx, signed),
139             F32 => tcx.types.f32,
140             F64 => tcx.types.f64,
141             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142         }
143     }
144
145     /// Return an *integer* type matching this primitive.
146     /// Useful in particular when dealing with enum discriminants.
147     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148         match *self {
149             Int(i, signed) => i.to_ty(tcx, signed),
150             Pointer => tcx.types.usize,
151             F32 | F64 => bug!("floats do not have an int type"),
152         }
153     }
154 }
155
156 /// The first half of a fat pointer.
157 ///
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
161
162 /// The second half of a fat pointer.
163 ///
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
167
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
170     Unknown(Ty<'tcx>),
171     SizeOverflow(Ty<'tcx>),
172 }
173
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
176         match *self {
177             LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
178             LayoutError::SizeOverflow(ty) => {
179                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
180             }
181         }
182     }
183 }
184
185 fn layout_raw<'tcx>(
186     tcx: TyCtxt<'tcx>,
187     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189     ty::tls::with_related_context(tcx, move |icx| {
190         let (param_env, ty) = query.into_parts();
191
192         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
194         }
195
196         // Update the ImplicitCtxt to increase the layout_depth
197         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
198
199         ty::tls::enter_context(&icx, |_| {
200             let cx = LayoutCx { tcx, param_env };
201             let layout = cx.layout_raw_uncached(ty);
202             // Type-level uninhabitedness should always imply ABI uninhabitedness.
203             if let Ok(layout) = layout {
204                 if ty.conservative_is_privately_uninhabited(tcx) {
205                     assert!(layout.abi.is_uninhabited());
206                 }
207             }
208             layout
209         })
210     })
211 }
212
213 pub fn provide(providers: &mut ty::query::Providers) {
214     *providers = ty::query::Providers { layout_raw, ..*providers };
215 }
216
217 pub struct LayoutCx<'tcx, C> {
218     pub tcx: C,
219     pub param_env: ty::ParamEnv<'tcx>,
220 }
221
222 #[derive(Copy, Clone, Debug)]
223 enum StructKind {
224     /// A tuple, closure, or univariant which cannot be coerced to unsized.
225     AlwaysSized,
226     /// A univariant, the last field of which may be coerced to unsized.
227     MaybeUnsized,
228     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229     Prefixed(Size, Align),
230 }
231
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238     let mut inverse = vec![0; map.len()];
239     for i in 0..map.len() {
240         inverse[map[i] as usize] = i as u32;
241     }
242     inverse
243 }
244
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247         let dl = self.data_layout();
248         let b_align = b.value.align(dl);
249         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250         let b_offset = a.value.size(dl).align_to(b_align.abi);
251         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
252
253         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254         // returns the last maximum.
255         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256             .into_iter()
257             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258             .max_by_key(|niche| niche.available(dl));
259
260         Layout {
261             variants: Variants::Single { index: VariantIdx::new(0) },
262             fields: FieldsShape::Arbitrary {
263                 offsets: vec![Size::ZERO, b_offset],
264                 memory_index: vec![0, 1],
265             },
266             abi: Abi::ScalarPair(a, b),
267             largest_niche,
268             align,
269             size,
270         }
271     }
272
273     fn univariant_uninterned(
274         &self,
275         ty: Ty<'tcx>,
276         fields: &[TyAndLayout<'_>],
277         repr: &ReprOptions,
278         kind: StructKind,
279     ) -> Result<Layout, LayoutError<'tcx>> {
280         let dl = self.data_layout();
281         let pack = repr.pack;
282         if pack.is_some() && repr.align.is_some() {
283             bug!("struct cannot be packed and aligned");
284         }
285
286         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
287
288         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
289
290         let optimize = !repr.inhibit_struct_field_reordering_opt();
291         if optimize {
292             let end =
293                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294             let optimizing = &mut inverse_memory_index[..end];
295             let field_align = |f: &TyAndLayout<'_>| {
296                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
297             };
298             match kind {
299                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300                     optimizing.sort_by_key(|&x| {
301                         // Place ZSTs first to avoid "interesting offsets",
302                         // especially with only one or two non-ZST fields.
303                         let f = &fields[x as usize];
304                         (!f.is_zst(), cmp::Reverse(field_align(f)))
305                     });
306                 }
307                 StructKind::Prefixed(..) => {
308                     // Sort in ascending alignment so that the layout stay optimal
309                     // regardless of the prefix
310                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
311                 }
312             }
313         }
314
315         // inverse_memory_index holds field indices by increasing memory offset.
316         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317         // We now write field offsets to the corresponding offset slot;
318         // field 5 with offset 0 puts 0 in offsets[5].
319         // At the bottom of this function, we invert `inverse_memory_index` to
320         // produce `memory_index` (see `invert_mapping`).
321
322         let mut sized = true;
323         let mut offsets = vec![Size::ZERO; fields.len()];
324         let mut offset = Size::ZERO;
325         let mut largest_niche = None;
326         let mut largest_niche_available = 0;
327
328         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
329             let prefix_align =
330                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331             align = align.max(AbiAndPrefAlign::new(prefix_align));
332             offset = prefix_size.align_to(prefix_align);
333         }
334
335         for &i in &inverse_memory_index {
336             let field = fields[i as usize];
337             if !sized {
338                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
339             }
340
341             if field.is_unsized() {
342                 sized = false;
343             }
344
345             // Invariant: offset < dl.obj_size_bound() <= 1<<61
346             let field_align = if let Some(pack) = pack {
347                 field.align.min(AbiAndPrefAlign::new(pack))
348             } else {
349                 field.align
350             };
351             offset = offset.align_to(field_align.abi);
352             align = align.max(field_align);
353
354             debug!("univariant offset: {:?} field: {:#?}", offset, field);
355             offsets[i as usize] = offset;
356
357             if !repr.hide_niche() {
358                 if let Some(mut niche) = field.largest_niche.clone() {
359                     let available = niche.available(dl);
360                     if available > largest_niche_available {
361                         largest_niche_available = available;
362                         niche.offset += offset;
363                         largest_niche = Some(niche);
364                     }
365                 }
366             }
367
368             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
369         }
370
371         if let Some(repr_align) = repr.align {
372             align = align.max(AbiAndPrefAlign::new(repr_align));
373         }
374
375         debug!("univariant min_size: {:?}", offset);
376         let min_size = offset;
377
378         // As stated above, inverse_memory_index holds field indices by increasing offset.
379         // This makes it an already-sorted view of the offsets vec.
380         // To invert it, consider:
381         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382         // Field 5 would be the first element, so memory_index is i:
383         // Note: if we didn't optimize, it's already right.
384
385         let memory_index =
386             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
387
388         let size = min_size.align_to(align.abi);
389         let mut abi = Abi::Aggregate { sized };
390
391         // Unpack newtype ABIs and find scalar pairs.
392         if sized && size.bytes() > 0 {
393             // All other fields must be ZSTs, and we need them to all start at 0.
394             let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
395             if zst_offsets.all(|(_, o)| o.bytes() == 0) {
396                 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
397
398                 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
399                     // We have exactly one non-ZST field.
400                     (Some((i, field)), None, None) => {
401                         // Field fills the struct and it has a scalar or scalar pair ABI.
402                         if offsets[i].bytes() == 0
403                             && align.abi == field.align.abi
404                             && size == field.size
405                         {
406                             match field.abi {
407                                 // For plain scalars, or vectors of them, we can't unpack
408                                 // newtypes for `#[repr(C)]`, as that affects C ABIs.
409                                 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
410                                     abi = field.abi.clone();
411                                 }
412                                 // But scalar pairs are Rust-specific and get
413                                 // treated as aggregates by C ABIs anyway.
414                                 Abi::ScalarPair(..) => {
415                                     abi = field.abi.clone();
416                                 }
417                                 _ => {}
418                             }
419                         }
420                     }
421
422                     // Two non-ZST fields, and they're both scalars.
423                     (
424                         Some((
425                             i,
426                             &TyAndLayout {
427                                 layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
428                             },
429                         )),
430                         Some((
431                             j,
432                             &TyAndLayout {
433                                 layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
434                             },
435                         )),
436                         None,
437                     ) => {
438                         // Order by the memory placement, not source order.
439                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
440                             ((i, a), (j, b))
441                         } else {
442                             ((j, b), (i, a))
443                         };
444                         let pair = self.scalar_pair(a.clone(), b.clone());
445                         let pair_offsets = match pair.fields {
446                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
447                                 assert_eq!(memory_index, &[0, 1]);
448                                 offsets
449                             }
450                             _ => bug!(),
451                         };
452                         if offsets[i] == pair_offsets[0]
453                             && offsets[j] == pair_offsets[1]
454                             && align == pair.align
455                             && size == pair.size
456                         {
457                             // We can use `ScalarPair` only when it matches our
458                             // already computed layout (including `#[repr(C)]`).
459                             abi = pair.abi;
460                         }
461                     }
462
463                     _ => {}
464                 }
465             }
466         }
467
468         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
469             abi = Abi::Uninhabited;
470         }
471
472         Ok(Layout {
473             variants: Variants::Single { index: VariantIdx::new(0) },
474             fields: FieldsShape::Arbitrary { offsets, memory_index },
475             abi,
476             largest_niche,
477             align,
478             size,
479         })
480     }
481
482     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
483         let tcx = self.tcx;
484         let param_env = self.param_env;
485         let dl = self.data_layout();
486         let scalar_unit = |value: Primitive| {
487             let bits = value.size(dl).bits();
488             assert!(bits <= 128);
489             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
490         };
491         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
492
493         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
494             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
495         };
496         debug_assert!(!ty.has_infer_types_or_consts());
497
498         Ok(match ty.kind {
499             // Basic scalars.
500             ty::Bool => tcx.intern_layout(Layout::scalar(
501                 self,
502                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
503             )),
504             ty::Char => tcx.intern_layout(Layout::scalar(
505                 self,
506                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
507             )),
508             ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
509             ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
510             ty::Float(fty) => scalar(match fty {
511                 ast::FloatTy::F32 => F32,
512                 ast::FloatTy::F64 => F64,
513             }),
514             ty::FnPtr(_) => {
515                 let mut ptr = scalar_unit(Pointer);
516                 ptr.valid_range = 1..=*ptr.valid_range.end();
517                 tcx.intern_layout(Layout::scalar(self, ptr))
518             }
519
520             // The never type.
521             ty::Never => tcx.intern_layout(Layout {
522                 variants: Variants::Single { index: VariantIdx::new(0) },
523                 fields: FieldsShape::Primitive,
524                 abi: Abi::Uninhabited,
525                 largest_niche: None,
526                 align: dl.i8_align,
527                 size: Size::ZERO,
528             }),
529
530             // Potentially-wide pointers.
531             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
532                 let mut data_ptr = scalar_unit(Pointer);
533                 if !ty.is_unsafe_ptr() {
534                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
535                 }
536
537                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
538                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
539                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
540                 }
541
542                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
543                 let metadata = match unsized_part.kind {
544                     ty::Foreign(..) => {
545                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
546                     }
547                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
548                     ty::Dynamic(..) => {
549                         let mut vtable = scalar_unit(Pointer);
550                         vtable.valid_range = 1..=*vtable.valid_range.end();
551                         vtable
552                     }
553                     _ => return Err(LayoutError::Unknown(unsized_part)),
554                 };
555
556                 // Effectively a (ptr, meta) tuple.
557                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
558             }
559
560             // Arrays and slices.
561             ty::Array(element, mut count) => {
562                 if count.has_projections() {
563                     count = tcx.normalize_erasing_regions(param_env, count);
564                     if count.has_projections() {
565                         return Err(LayoutError::Unknown(ty));
566                     }
567                 }
568
569                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
570                 let element = self.layout_of(element)?;
571                 let size =
572                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
573
574                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
575                     Abi::Uninhabited
576                 } else {
577                     Abi::Aggregate { sized: true }
578                 };
579
580                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
581
582                 tcx.intern_layout(Layout {
583                     variants: Variants::Single { index: VariantIdx::new(0) },
584                     fields: FieldsShape::Array { stride: element.size, count },
585                     abi,
586                     largest_niche,
587                     align: element.align,
588                     size,
589                 })
590             }
591             ty::Slice(element) => {
592                 let element = self.layout_of(element)?;
593                 tcx.intern_layout(Layout {
594                     variants: Variants::Single { index: VariantIdx::new(0) },
595                     fields: FieldsShape::Array { stride: element.size, count: 0 },
596                     abi: Abi::Aggregate { sized: false },
597                     largest_niche: None,
598                     align: element.align,
599                     size: Size::ZERO,
600                 })
601             }
602             ty::Str => tcx.intern_layout(Layout {
603                 variants: Variants::Single { index: VariantIdx::new(0) },
604                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
605                 abi: Abi::Aggregate { sized: false },
606                 largest_niche: None,
607                 align: dl.i8_align,
608                 size: Size::ZERO,
609             }),
610
611             // Odd unit types.
612             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
613             ty::Dynamic(..) | ty::Foreign(..) => {
614                 let mut unit = self.univariant_uninterned(
615                     ty,
616                     &[],
617                     &ReprOptions::default(),
618                     StructKind::AlwaysSized,
619                 )?;
620                 match unit.abi {
621                     Abi::Aggregate { ref mut sized } => *sized = false,
622                     _ => bug!(),
623                 }
624                 tcx.intern_layout(unit)
625             }
626
627             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
628
629             ty::Closure(_, ref substs) => {
630                 let tys = substs.as_closure().upvar_tys();
631                 univariant(
632                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
633                     &ReprOptions::default(),
634                     StructKind::AlwaysSized,
635                 )?
636             }
637
638             ty::Tuple(tys) => {
639                 let kind =
640                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
641
642                 univariant(
643                     &tys.iter()
644                         .map(|k| self.layout_of(k.expect_ty()))
645                         .collect::<Result<Vec<_>, _>>()?,
646                     &ReprOptions::default(),
647                     kind,
648                 )?
649             }
650
651             // SIMD vector types.
652             ty::Adt(def, ..) if def.repr.simd() => {
653                 let element = self.layout_of(ty.simd_type(tcx))?;
654                 let count = ty.simd_size(tcx);
655                 assert!(count > 0);
656                 let scalar = match element.abi {
657                     Abi::Scalar(ref scalar) => scalar.clone(),
658                     _ => {
659                         tcx.sess.fatal(&format!(
660                             "monomorphising SIMD type `{}` with \
661                                                  a non-machine element type `{}`",
662                             ty, element.ty
663                         ));
664                     }
665                 };
666                 let size =
667                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
668                 let align = dl.vector_align(size);
669                 let size = size.align_to(align.abi);
670
671                 tcx.intern_layout(Layout {
672                     variants: Variants::Single { index: VariantIdx::new(0) },
673                     fields: FieldsShape::Array { stride: element.size, count },
674                     abi: Abi::Vector { element: scalar, count },
675                     largest_niche: element.largest_niche.clone(),
676                     size,
677                     align,
678                 })
679             }
680
681             // ADTs.
682             ty::Adt(def, substs) => {
683                 // Cache the field layouts.
684                 let variants = def
685                     .variants
686                     .iter()
687                     .map(|v| {
688                         v.fields
689                             .iter()
690                             .map(|field| self.layout_of(field.ty(tcx, substs)))
691                             .collect::<Result<Vec<_>, _>>()
692                     })
693                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
694
695                 if def.is_union() {
696                     if def.repr.pack.is_some() && def.repr.align.is_some() {
697                         bug!("union cannot be packed and aligned");
698                     }
699
700                     let mut align =
701                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
702
703                     if let Some(repr_align) = def.repr.align {
704                         align = align.max(AbiAndPrefAlign::new(repr_align));
705                     }
706
707                     let optimize = !def.repr.inhibit_union_abi_opt();
708                     let mut size = Size::ZERO;
709                     let mut abi = Abi::Aggregate { sized: true };
710                     let index = VariantIdx::new(0);
711                     for field in &variants[index] {
712                         assert!(!field.is_unsized());
713                         align = align.max(field.align);
714
715                         // If all non-ZST fields have the same ABI, forward this ABI
716                         if optimize && !field.is_zst() {
717                             // Normalize scalar_unit to the maximal valid range
718                             let field_abi = match &field.abi {
719                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
720                                 Abi::ScalarPair(x, y) => {
721                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
722                                 }
723                                 Abi::Vector { element: x, count } => {
724                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
725                                 }
726                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
727                                     Abi::Aggregate { sized: true }
728                                 }
729                             };
730
731                             if size == Size::ZERO {
732                                 // first non ZST: initialize 'abi'
733                                 abi = field_abi;
734                             } else if abi != field_abi {
735                                 // different fields have different ABI: reset to Aggregate
736                                 abi = Abi::Aggregate { sized: true };
737                             }
738                         }
739
740                         size = cmp::max(size, field.size);
741                     }
742
743                     if let Some(pack) = def.repr.pack {
744                         align = align.min(AbiAndPrefAlign::new(pack));
745                     }
746
747                     return Ok(tcx.intern_layout(Layout {
748                         variants: Variants::Single { index },
749                         fields: FieldsShape::Union(
750                             NonZeroUsize::new(variants[index].len())
751                                 .ok_or(LayoutError::Unknown(ty))?,
752                         ),
753                         abi,
754                         largest_niche: None,
755                         align,
756                         size: size.align_to(align.abi),
757                     }));
758                 }
759
760                 // A variant is absent if it's uninhabited and only has ZST fields.
761                 // Present uninhabited variants only require space for their fields,
762                 // but *not* an encoding of the discriminant (e.g., a tag value).
763                 // See issue #49298 for more details on the need to leave space
764                 // for non-ZST uninhabited data (mostly partial initialization).
765                 let absent = |fields: &[TyAndLayout<'_>]| {
766                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
767                     let is_zst = fields.iter().all(|f| f.is_zst());
768                     uninhabited && is_zst
769                 };
770                 let (present_first, present_second) = {
771                     let mut present_variants = variants
772                         .iter_enumerated()
773                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
774                     (present_variants.next(), present_variants.next())
775                 };
776                 let present_first = match present_first {
777                     Some(present_first) => present_first,
778                     // Uninhabited because it has no variants, or only absent ones.
779                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
780                     // If it's a struct, still compute a layout so that we can still compute the
781                     // field offsets.
782                     None => VariantIdx::new(0),
783                 };
784
785                 let is_struct = !def.is_enum() ||
786                     // Only one variant is present.
787                     (present_second.is_none() &&
788                     // Representation optimizations are allowed.
789                     !def.repr.inhibit_enum_layout_opt());
790                 if is_struct {
791                     // Struct, or univariant enum equivalent to a struct.
792                     // (Typechecking will reject discriminant-sizing attrs.)
793
794                     let v = present_first;
795                     let kind = if def.is_enum() || variants[v].is_empty() {
796                         StructKind::AlwaysSized
797                     } else {
798                         let param_env = tcx.param_env(def.did);
799                         let last_field = def.variants[v].fields.last().unwrap();
800                         let always_sized =
801                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
802                         if !always_sized {
803                             StructKind::MaybeUnsized
804                         } else {
805                             StructKind::AlwaysSized
806                         }
807                     };
808
809                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
810                     st.variants = Variants::Single { index: v };
811                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
812                     match st.abi {
813                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
814                             // the asserts ensure that we are not using the
815                             // `#[rustc_layout_scalar_valid_range(n)]`
816                             // attribute to widen the range of anything as that would probably
817                             // result in UB somewhere
818                             // FIXME(eddyb) the asserts are probably not needed,
819                             // as larger validity ranges would result in missed
820                             // optimizations, *not* wrongly assuming the inner
821                             // value is valid. e.g. unions enlarge validity ranges,
822                             // because the values may be uninitialized.
823                             if let Bound::Included(start) = start {
824                                 // FIXME(eddyb) this might be incorrect - it doesn't
825                                 // account for wrap-around (end < start) ranges.
826                                 assert!(*scalar.valid_range.start() <= start);
827                                 scalar.valid_range = start..=*scalar.valid_range.end();
828                             }
829                             if let Bound::Included(end) = end {
830                                 // FIXME(eddyb) this might be incorrect - it doesn't
831                                 // account for wrap-around (end < start) ranges.
832                                 assert!(*scalar.valid_range.end() >= end);
833                                 scalar.valid_range = *scalar.valid_range.start()..=end;
834                             }
835
836                             // Update `largest_niche` if we have introduced a larger niche.
837                             let niche = if def.repr.hide_niche() {
838                                 None
839                             } else {
840                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
841                             };
842                             if let Some(niche) = niche {
843                                 match &st.largest_niche {
844                                     Some(largest_niche) => {
845                                         // Replace the existing niche even if they're equal,
846                                         // because this one is at a lower offset.
847                                         if largest_niche.available(dl) <= niche.available(dl) {
848                                             st.largest_niche = Some(niche);
849                                         }
850                                     }
851                                     None => st.largest_niche = Some(niche),
852                                 }
853                             }
854                         }
855                         _ => assert!(
856                             start == Bound::Unbounded && end == Bound::Unbounded,
857                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
858                             def,
859                             st,
860                         ),
861                     }
862
863                     return Ok(tcx.intern_layout(st));
864                 }
865
866                 // At this point, we have handled all unions and
867                 // structs. (We have also handled univariant enums
868                 // that allow representation optimization.)
869                 assert!(def.is_enum());
870
871                 // The current code for niche-filling relies on variant indices
872                 // instead of actual discriminants, so dataful enums with
873                 // explicit discriminants (RFC #2363) would misbehave.
874                 let no_explicit_discriminants = def
875                     .variants
876                     .iter_enumerated()
877                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
878
879                 let mut niche_filling_layout = None;
880
881                 // Niche-filling enum optimization.
882                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
883                     let mut dataful_variant = None;
884                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
885
886                     // Find one non-ZST variant.
887                     'variants: for (v, fields) in variants.iter_enumerated() {
888                         if absent(fields) {
889                             continue 'variants;
890                         }
891                         for f in fields {
892                             if !f.is_zst() {
893                                 if dataful_variant.is_none() {
894                                     dataful_variant = Some(v);
895                                     continue 'variants;
896                                 } else {
897                                     dataful_variant = None;
898                                     break 'variants;
899                                 }
900                             }
901                         }
902                         niche_variants = *niche_variants.start().min(&v)..=v;
903                     }
904
905                     if niche_variants.start() > niche_variants.end() {
906                         dataful_variant = None;
907                     }
908
909                     if let Some(i) = dataful_variant {
910                         let count = (niche_variants.end().as_u32()
911                             - niche_variants.start().as_u32()
912                             + 1) as u128;
913
914                         // Find the field with the largest niche
915                         let niche_candidate = variants[i]
916                             .iter()
917                             .enumerate()
918                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
919                             .max_by_key(|(_, niche)| niche.available(dl));
920
921                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
922                             niche_candidate.and_then(|(field_index, niche)| {
923                                 Some((field_index, niche, niche.reserve(self, count)?))
924                             })
925                         {
926                             let mut align = dl.aggregate_align;
927                             let st = variants
928                                 .iter_enumerated()
929                                 .map(|(j, v)| {
930                                     let mut st = self.univariant_uninterned(
931                                         ty,
932                                         v,
933                                         &def.repr,
934                                         StructKind::AlwaysSized,
935                                     )?;
936                                     st.variants = Variants::Single { index: j };
937
938                                     align = align.max(st.align);
939
940                                     Ok(st)
941                                 })
942                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
943
944                             let offset = st[i].fields.offset(field_index) + niche.offset;
945                             let size = st[i].size;
946
947                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
948                                 Abi::Uninhabited
949                             } else {
950                                 match st[i].abi {
951                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
952                                     Abi::ScalarPair(ref first, ref second) => {
953                                         // We need to use scalar_unit to reset the
954                                         // valid range to the maximal one for that
955                                         // primitive, because only the niche is
956                                         // guaranteed to be initialised, not the
957                                         // other primitive.
958                                         if offset.bytes() == 0 {
959                                             Abi::ScalarPair(
960                                                 niche_scalar.clone(),
961                                                 scalar_unit(second.value),
962                                             )
963                                         } else {
964                                             Abi::ScalarPair(
965                                                 scalar_unit(first.value),
966                                                 niche_scalar.clone(),
967                                             )
968                                         }
969                                     }
970                                     _ => Abi::Aggregate { sized: true },
971                                 }
972                             };
973
974                             let largest_niche =
975                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
976
977                             niche_filling_layout = Some(Layout {
978                                 variants: Variants::Multiple {
979                                     tag: niche_scalar,
980                                     tag_encoding: TagEncoding::Niche {
981                                         dataful_variant: i,
982                                         niche_variants,
983                                         niche_start,
984                                     },
985                                     tag_field: 0,
986                                     variants: st,
987                                 },
988                                 fields: FieldsShape::Arbitrary {
989                                     offsets: vec![offset],
990                                     memory_index: vec![0],
991                                 },
992                                 abi,
993                                 largest_niche,
994                                 size,
995                                 align,
996                             });
997                         }
998                     }
999                 }
1000
1001                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1002                 let discr_type = def.repr.discr_type();
1003                 let bits = Integer::from_attr(self, discr_type).size().bits();
1004                 for (i, discr) in def.discriminants(tcx) {
1005                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1006                         continue;
1007                     }
1008                     let mut x = discr.val as i128;
1009                     if discr_type.is_signed() {
1010                         // sign extend the raw representation to be an i128
1011                         x = (x << (128 - bits)) >> (128 - bits);
1012                     }
1013                     if x < min {
1014                         min = x;
1015                     }
1016                     if x > max {
1017                         max = x;
1018                     }
1019                 }
1020                 // We might have no inhabited variants, so pretend there's at least one.
1021                 if (min, max) == (i128::MAX, i128::MIN) {
1022                     min = 0;
1023                     max = 0;
1024                 }
1025                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1026                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1027
1028                 let mut align = dl.aggregate_align;
1029                 let mut size = Size::ZERO;
1030
1031                 // We're interested in the smallest alignment, so start large.
1032                 let mut start_align = Align::from_bytes(256).unwrap();
1033                 assert_eq!(Integer::for_align(dl, start_align), None);
1034
1035                 // repr(C) on an enum tells us to make a (tag, union) layout,
1036                 // so we need to grow the prefix alignment to be at least
1037                 // the alignment of the union. (This value is used both for
1038                 // determining the alignment of the overall enum, and the
1039                 // determining the alignment of the payload after the tag.)
1040                 let mut prefix_align = min_ity.align(dl).abi;
1041                 if def.repr.c() {
1042                     for fields in &variants {
1043                         for field in fields {
1044                             prefix_align = prefix_align.max(field.align.abi);
1045                         }
1046                     }
1047                 }
1048
1049                 // Create the set of structs that represent each variant.
1050                 let mut layout_variants = variants
1051                     .iter_enumerated()
1052                     .map(|(i, field_layouts)| {
1053                         let mut st = self.univariant_uninterned(
1054                             ty,
1055                             &field_layouts,
1056                             &def.repr,
1057                             StructKind::Prefixed(min_ity.size(), prefix_align),
1058                         )?;
1059                         st.variants = Variants::Single { index: i };
1060                         // Find the first field we can't move later
1061                         // to make room for a larger discriminant.
1062                         for field in
1063                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1064                         {
1065                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1066                                 start_align = start_align.min(field.align.abi);
1067                                 break;
1068                             }
1069                         }
1070                         size = cmp::max(size, st.size);
1071                         align = align.max(st.align);
1072                         Ok(st)
1073                     })
1074                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1075
1076                 // Align the maximum variant size to the largest alignment.
1077                 size = size.align_to(align.abi);
1078
1079                 if size.bytes() >= dl.obj_size_bound() {
1080                     return Err(LayoutError::SizeOverflow(ty));
1081                 }
1082
1083                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1084                 if typeck_ity < min_ity {
1085                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1086                     // some reason at this point (based on values discriminant can take on). Mostly
1087                     // because this discriminant will be loaded, and then stored into variable of
1088                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1089                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1090                     // discriminant values. That would be a bug, because then, in codegen, in order
1091                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1092                     // space necessary to represent would have to be discarded (or layout is wrong
1093                     // on thinking it needs 16 bits)
1094                     bug!(
1095                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1096                         min_ity,
1097                         typeck_ity
1098                     );
1099                     // However, it is fine to make discr type however large (as an optimisation)
1100                     // after this point â€“ we’ll just truncate the value we load in codegen.
1101                 }
1102
1103                 // Check to see if we should use a different type for the
1104                 // discriminant. We can safely use a type with the same size
1105                 // as the alignment of the first field of each variant.
1106                 // We increase the size of the discriminant to avoid LLVM copying
1107                 // padding when it doesn't need to. This normally causes unaligned
1108                 // load/stores and excessive memcpy/memset operations. By using a
1109                 // bigger integer size, LLVM can be sure about its contents and
1110                 // won't be so conservative.
1111
1112                 // Use the initial field alignment
1113                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1114                     min_ity
1115                 } else {
1116                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1117                 };
1118
1119                 // If the alignment is not larger than the chosen discriminant size,
1120                 // don't use the alignment as the final size.
1121                 if ity <= min_ity {
1122                     ity = min_ity;
1123                 } else {
1124                     // Patch up the variants' first few fields.
1125                     let old_ity_size = min_ity.size();
1126                     let new_ity_size = ity.size();
1127                     for variant in &mut layout_variants {
1128                         match variant.fields {
1129                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1130                                 for i in offsets {
1131                                     if *i <= old_ity_size {
1132                                         assert_eq!(*i, old_ity_size);
1133                                         *i = new_ity_size;
1134                                     }
1135                                 }
1136                                 // We might be making the struct larger.
1137                                 if variant.size <= old_ity_size {
1138                                     variant.size = new_ity_size;
1139                                 }
1140                             }
1141                             _ => bug!(),
1142                         }
1143                     }
1144                 }
1145
1146                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1147                 let tag = Scalar {
1148                     value: Int(ity, signed),
1149                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1150                 };
1151                 let mut abi = Abi::Aggregate { sized: true };
1152                 if tag.value.size(dl) == size {
1153                     abi = Abi::Scalar(tag.clone());
1154                 } else {
1155                     // Try to use a ScalarPair for all tagged enums.
1156                     let mut common_prim = None;
1157                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1158                         let offsets = match layout_variant.fields {
1159                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1160                             _ => bug!(),
1161                         };
1162                         let mut fields =
1163                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1164                         let (field, offset) = match (fields.next(), fields.next()) {
1165                             (None, None) => continue,
1166                             (Some(pair), None) => pair,
1167                             _ => {
1168                                 common_prim = None;
1169                                 break;
1170                             }
1171                         };
1172                         let prim = match field.abi {
1173                             Abi::Scalar(ref scalar) => scalar.value,
1174                             _ => {
1175                                 common_prim = None;
1176                                 break;
1177                             }
1178                         };
1179                         if let Some(pair) = common_prim {
1180                             // This is pretty conservative. We could go fancier
1181                             // by conflating things like i32 and u32, or even
1182                             // realising that (u8, u8) could just cohabit with
1183                             // u16 or even u32.
1184                             if pair != (prim, offset) {
1185                                 common_prim = None;
1186                                 break;
1187                             }
1188                         } else {
1189                             common_prim = Some((prim, offset));
1190                         }
1191                     }
1192                     if let Some((prim, offset)) = common_prim {
1193                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1194                         let pair_offsets = match pair.fields {
1195                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1196                                 assert_eq!(memory_index, &[0, 1]);
1197                                 offsets
1198                             }
1199                             _ => bug!(),
1200                         };
1201                         if pair_offsets[0] == Size::ZERO
1202                             && pair_offsets[1] == *offset
1203                             && align == pair.align
1204                             && size == pair.size
1205                         {
1206                             // We can use `ScalarPair` only when it matches our
1207                             // already computed layout (including `#[repr(C)]`).
1208                             abi = pair.abi;
1209                         }
1210                     }
1211                 }
1212
1213                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1214                     abi = Abi::Uninhabited;
1215                 }
1216
1217                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1218
1219                 let tagged_layout = Layout {
1220                     variants: Variants::Multiple {
1221                         tag,
1222                         tag_encoding: TagEncoding::Direct,
1223                         tag_field: 0,
1224                         variants: layout_variants,
1225                     },
1226                     fields: FieldsShape::Arbitrary {
1227                         offsets: vec![Size::ZERO],
1228                         memory_index: vec![0],
1229                     },
1230                     largest_niche,
1231                     abi,
1232                     align,
1233                     size,
1234                 };
1235
1236                 let best_layout = match (tagged_layout, niche_filling_layout) {
1237                     (tagged_layout, Some(niche_filling_layout)) => {
1238                         // Pick the smaller layout; otherwise,
1239                         // pick the layout with the larger niche; otherwise,
1240                         // pick tagged as it has simpler codegen.
1241                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1242                             let niche_size =
1243                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1244                             (layout.size, cmp::Reverse(niche_size))
1245                         })
1246                     }
1247                     (tagged_layout, None) => tagged_layout,
1248                 };
1249
1250                 tcx.intern_layout(best_layout)
1251             }
1252
1253             // Types with no meaningful known layout.
1254             ty::Projection(_) | ty::Opaque(..) => {
1255                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1256                 if ty == normalized {
1257                     return Err(LayoutError::Unknown(ty));
1258                 }
1259                 tcx.layout_raw(param_env.and(normalized))?
1260             }
1261
1262             ty::Bound(..) | ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1263                 bug!("Layout::compute: unexpected type `{}`", ty)
1264             }
1265
1266             ty::Param(_) | ty::Error(_) => {
1267                 return Err(LayoutError::Unknown(ty));
1268             }
1269         })
1270     }
1271 }
1272
1273 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1274 #[derive(Clone, Debug, PartialEq)]
1275 enum SavedLocalEligibility {
1276     Unassigned,
1277     Assigned(VariantIdx),
1278     // FIXME: Use newtype_index so we aren't wasting bytes
1279     Ineligible(Option<u32>),
1280 }
1281
1282 // When laying out generators, we divide our saved local fields into two
1283 // categories: overlap-eligible and overlap-ineligible.
1284 //
1285 // Those fields which are ineligible for overlap go in a "prefix" at the
1286 // beginning of the layout, and always have space reserved for them.
1287 //
1288 // Overlap-eligible fields are only assigned to one variant, so we lay
1289 // those fields out for each variant and put them right after the
1290 // prefix.
1291 //
1292 // Finally, in the layout details, we point to the fields from the
1293 // variants they are assigned to. It is possible for some fields to be
1294 // included in multiple variants. No field ever "moves around" in the
1295 // layout; its offset is always the same.
1296 //
1297 // Also included in the layout are the upvars and the discriminant.
1298 // These are included as fields on the "outer" layout; they are not part
1299 // of any variant.
1300 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1301     /// Compute the eligibility and assignment of each local.
1302     fn generator_saved_local_eligibility(
1303         &self,
1304         info: &GeneratorLayout<'tcx>,
1305     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1306         use SavedLocalEligibility::*;
1307
1308         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1309             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1310
1311         // The saved locals not eligible for overlap. These will get
1312         // "promoted" to the prefix of our generator.
1313         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1314
1315         // Figure out which of our saved locals are fields in only
1316         // one variant. The rest are deemed ineligible for overlap.
1317         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1318             for local in fields {
1319                 match assignments[*local] {
1320                     Unassigned => {
1321                         assignments[*local] = Assigned(variant_index);
1322                     }
1323                     Assigned(idx) => {
1324                         // We've already seen this local at another suspension
1325                         // point, so it is no longer a candidate.
1326                         trace!(
1327                             "removing local {:?} in >1 variant ({:?}, {:?})",
1328                             local,
1329                             variant_index,
1330                             idx
1331                         );
1332                         ineligible_locals.insert(*local);
1333                         assignments[*local] = Ineligible(None);
1334                     }
1335                     Ineligible(_) => {}
1336                 }
1337             }
1338         }
1339
1340         // Next, check every pair of eligible locals to see if they
1341         // conflict.
1342         for local_a in info.storage_conflicts.rows() {
1343             let conflicts_a = info.storage_conflicts.count(local_a);
1344             if ineligible_locals.contains(local_a) {
1345                 continue;
1346             }
1347
1348             for local_b in info.storage_conflicts.iter(local_a) {
1349                 // local_a and local_b are storage live at the same time, therefore they
1350                 // cannot overlap in the generator layout. The only way to guarantee
1351                 // this is if they are in the same variant, or one is ineligible
1352                 // (which means it is stored in every variant).
1353                 if ineligible_locals.contains(local_b)
1354                     || assignments[local_a] == assignments[local_b]
1355                 {
1356                     continue;
1357                 }
1358
1359                 // If they conflict, we will choose one to make ineligible.
1360                 // This is not always optimal; it's just a greedy heuristic that
1361                 // seems to produce good results most of the time.
1362                 let conflicts_b = info.storage_conflicts.count(local_b);
1363                 let (remove, other) =
1364                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1365                 ineligible_locals.insert(remove);
1366                 assignments[remove] = Ineligible(None);
1367                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1368             }
1369         }
1370
1371         // Count the number of variants in use. If only one of them, then it is
1372         // impossible to overlap any locals in our layout. In this case it's
1373         // always better to make the remaining locals ineligible, so we can
1374         // lay them out with the other locals in the prefix and eliminate
1375         // unnecessary padding bytes.
1376         {
1377             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1378             for assignment in &assignments {
1379                 if let Assigned(idx) = assignment {
1380                     used_variants.insert(*idx);
1381                 }
1382             }
1383             if used_variants.count() < 2 {
1384                 for assignment in assignments.iter_mut() {
1385                     *assignment = Ineligible(None);
1386                 }
1387                 ineligible_locals.insert_all();
1388             }
1389         }
1390
1391         // Write down the order of our locals that will be promoted to the prefix.
1392         {
1393             for (idx, local) in ineligible_locals.iter().enumerate() {
1394                 assignments[local] = Ineligible(Some(idx as u32));
1395             }
1396         }
1397         debug!("generator saved local assignments: {:?}", assignments);
1398
1399         (ineligible_locals, assignments)
1400     }
1401
1402     /// Compute the full generator layout.
1403     fn generator_layout(
1404         &self,
1405         ty: Ty<'tcx>,
1406         def_id: hir::def_id::DefId,
1407         substs: SubstsRef<'tcx>,
1408     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1409         use SavedLocalEligibility::*;
1410         let tcx = self.tcx;
1411
1412         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1413
1414         let info = tcx.generator_layout(def_id);
1415         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1416
1417         // Build a prefix layout, including "promoting" all ineligible
1418         // locals as part of the prefix. We compute the layout of all of
1419         // these fields at once to get optimal packing.
1420         let tag_index = substs.as_generator().prefix_tys().count();
1421
1422         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1423         let max_discr = (info.variant_fields.len() - 1) as u128;
1424         let discr_int = Integer::fit_unsigned(max_discr);
1425         let discr_int_ty = discr_int.to_ty(tcx, false);
1426         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1427         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1428         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1429
1430         let promoted_layouts = ineligible_locals
1431             .iter()
1432             .map(|local| subst_field(info.field_tys[local]))
1433             .map(|ty| tcx.mk_maybe_uninit(ty))
1434             .map(|ty| self.layout_of(ty));
1435         let prefix_layouts = substs
1436             .as_generator()
1437             .prefix_tys()
1438             .map(|ty| self.layout_of(ty))
1439             .chain(iter::once(Ok(tag_layout)))
1440             .chain(promoted_layouts)
1441             .collect::<Result<Vec<_>, _>>()?;
1442         let prefix = self.univariant_uninterned(
1443             ty,
1444             &prefix_layouts,
1445             &ReprOptions::default(),
1446             StructKind::AlwaysSized,
1447         )?;
1448
1449         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1450
1451         // Split the prefix layout into the "outer" fields (upvars and
1452         // discriminant) and the "promoted" fields. Promoted fields will
1453         // get included in each variant that requested them in
1454         // GeneratorLayout.
1455         debug!("prefix = {:#?}", prefix);
1456         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1457             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1458                 let mut inverse_memory_index = invert_mapping(&memory_index);
1459
1460                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1461                 // "outer" and "promoted" fields respectively.
1462                 let b_start = (tag_index + 1) as u32;
1463                 let offsets_b = offsets.split_off(b_start as usize);
1464                 let offsets_a = offsets;
1465
1466                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1467                 // by preserving the order but keeping only one disjoint "half" each.
1468                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1469                 let inverse_memory_index_b: Vec<_> =
1470                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1471                 inverse_memory_index.retain(|&i| i < b_start);
1472                 let inverse_memory_index_a = inverse_memory_index;
1473
1474                 // Since `inverse_memory_index_{a,b}` each only refer to their
1475                 // respective fields, they can be safely inverted
1476                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1477                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1478
1479                 let outer_fields =
1480                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1481                 (outer_fields, offsets_b, memory_index_b)
1482             }
1483             _ => bug!(),
1484         };
1485
1486         let mut size = prefix.size;
1487         let mut align = prefix.align;
1488         let variants = info
1489             .variant_fields
1490             .iter_enumerated()
1491             .map(|(index, variant_fields)| {
1492                 // Only include overlap-eligible fields when we compute our variant layout.
1493                 let variant_only_tys = variant_fields
1494                     .iter()
1495                     .filter(|local| match assignments[**local] {
1496                         Unassigned => bug!(),
1497                         Assigned(v) if v == index => true,
1498                         Assigned(_) => bug!("assignment does not match variant"),
1499                         Ineligible(_) => false,
1500                     })
1501                     .map(|local| subst_field(info.field_tys[*local]));
1502
1503                 let mut variant = self.univariant_uninterned(
1504                     ty,
1505                     &variant_only_tys
1506                         .map(|ty| self.layout_of(ty))
1507                         .collect::<Result<Vec<_>, _>>()?,
1508                     &ReprOptions::default(),
1509                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1510                 )?;
1511                 variant.variants = Variants::Single { index };
1512
1513                 let (offsets, memory_index) = match variant.fields {
1514                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1515                     _ => bug!(),
1516                 };
1517
1518                 // Now, stitch the promoted and variant-only fields back together in
1519                 // the order they are mentioned by our GeneratorLayout.
1520                 // Because we only use some subset (that can differ between variants)
1521                 // of the promoted fields, we can't just pick those elements of the
1522                 // `promoted_memory_index` (as we'd end up with gaps).
1523                 // So instead, we build an "inverse memory_index", as if all of the
1524                 // promoted fields were being used, but leave the elements not in the
1525                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1526                 // obtain a valid (bijective) mapping.
1527                 const INVALID_FIELD_IDX: u32 = !0;
1528                 let mut combined_inverse_memory_index =
1529                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1530                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1531                 let combined_offsets = variant_fields
1532                     .iter()
1533                     .enumerate()
1534                     .map(|(i, local)| {
1535                         let (offset, memory_index) = match assignments[*local] {
1536                             Unassigned => bug!(),
1537                             Assigned(_) => {
1538                                 let (offset, memory_index) =
1539                                     offsets_and_memory_index.next().unwrap();
1540                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1541                             }
1542                             Ineligible(field_idx) => {
1543                                 let field_idx = field_idx.unwrap() as usize;
1544                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1545                             }
1546                         };
1547                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1548                         offset
1549                     })
1550                     .collect();
1551
1552                 // Remove the unused slots and invert the mapping to obtain the
1553                 // combined `memory_index` (also see previous comment).
1554                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1555                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1556
1557                 variant.fields = FieldsShape::Arbitrary {
1558                     offsets: combined_offsets,
1559                     memory_index: combined_memory_index,
1560                 };
1561
1562                 size = size.max(variant.size);
1563                 align = align.max(variant.align);
1564                 Ok(variant)
1565             })
1566             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1567
1568         size = size.align_to(align.abi);
1569
1570         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1571         {
1572             Abi::Uninhabited
1573         } else {
1574             Abi::Aggregate { sized: true }
1575         };
1576
1577         let layout = tcx.intern_layout(Layout {
1578             variants: Variants::Multiple {
1579                 tag: tag,
1580                 tag_encoding: TagEncoding::Direct,
1581                 tag_field: tag_index,
1582                 variants,
1583             },
1584             fields: outer_fields,
1585             abi,
1586             largest_niche: prefix.largest_niche,
1587             size,
1588             align,
1589         });
1590         debug!("generator layout ({:?}): {:#?}", ty, layout);
1591         Ok(layout)
1592     }
1593
1594     /// This is invoked by the `layout_raw` query to record the final
1595     /// layout of each type.
1596     #[inline(always)]
1597     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1598         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1599         // for dumping later.
1600         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1601             self.record_layout_for_printing_outlined(layout)
1602         }
1603     }
1604
1605     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1606         // Ignore layouts that are done with non-empty environments or
1607         // non-monomorphic layouts, as the user only wants to see the stuff
1608         // resulting from the final codegen session.
1609         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1610             return;
1611         }
1612
1613         // (delay format until we actually need it)
1614         let record = |kind, packed, opt_discr_size, variants| {
1615             let type_desc = format!("{:?}", layout.ty);
1616             self.tcx.sess.code_stats.record_type_size(
1617                 kind,
1618                 type_desc,
1619                 layout.align.abi,
1620                 layout.size,
1621                 packed,
1622                 opt_discr_size,
1623                 variants,
1624             );
1625         };
1626
1627         let adt_def = match layout.ty.kind {
1628             ty::Adt(ref adt_def, _) => {
1629                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1630                 adt_def
1631             }
1632
1633             ty::Closure(..) => {
1634                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1635                 record(DataTypeKind::Closure, false, None, vec![]);
1636                 return;
1637             }
1638
1639             _ => {
1640                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1641                 return;
1642             }
1643         };
1644
1645         let adt_kind = adt_def.adt_kind();
1646         let adt_packed = adt_def.repr.pack.is_some();
1647
1648         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1649             let mut min_size = Size::ZERO;
1650             let field_info: Vec<_> = flds
1651                 .iter()
1652                 .enumerate()
1653                 .map(|(i, &name)| match layout.field(self, i) {
1654                     Err(err) => {
1655                         bug!("no layout found for field {}: `{:?}`", name, err);
1656                     }
1657                     Ok(field_layout) => {
1658                         let offset = layout.fields.offset(i);
1659                         let field_end = offset + field_layout.size;
1660                         if min_size < field_end {
1661                             min_size = field_end;
1662                         }
1663                         FieldInfo {
1664                             name: name.to_string(),
1665                             offset: offset.bytes(),
1666                             size: field_layout.size.bytes(),
1667                             align: field_layout.align.abi.bytes(),
1668                         }
1669                     }
1670                 })
1671                 .collect();
1672
1673             VariantInfo {
1674                 name: n.map(|n| n.to_string()),
1675                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1676                 align: layout.align.abi.bytes(),
1677                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1678                 fields: field_info,
1679             }
1680         };
1681
1682         match layout.variants {
1683             Variants::Single { index } => {
1684                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1685                 if !adt_def.variants.is_empty() {
1686                     let variant_def = &adt_def.variants[index];
1687                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1688                     record(
1689                         adt_kind.into(),
1690                         adt_packed,
1691                         None,
1692                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1693                     );
1694                 } else {
1695                     // (This case arises for *empty* enums; so give it
1696                     // zero variants.)
1697                     record(adt_kind.into(), adt_packed, None, vec![]);
1698                 }
1699             }
1700
1701             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1702                 debug!(
1703                     "print-type-size `{:#?}` adt general variants def {}",
1704                     layout.ty,
1705                     adt_def.variants.len()
1706                 );
1707                 let variant_infos: Vec<_> = adt_def
1708                     .variants
1709                     .iter_enumerated()
1710                     .map(|(i, variant_def)| {
1711                         let fields: Vec<_> =
1712                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1713                         build_variant_info(
1714                             Some(variant_def.ident),
1715                             &fields,
1716                             layout.for_variant(self, i),
1717                         )
1718                     })
1719                     .collect();
1720                 record(
1721                     adt_kind.into(),
1722                     adt_packed,
1723                     match tag_encoding {
1724                         TagEncoding::Direct => Some(tag.value.size(self)),
1725                         _ => None,
1726                     },
1727                     variant_infos,
1728                 );
1729             }
1730         }
1731     }
1732 }
1733
1734 /// Type size "skeleton", i.e., the only information determining a type's size.
1735 /// While this is conservative, (aside from constant sizes, only pointers,
1736 /// newtypes thereof and null pointer optimized enums are allowed), it is
1737 /// enough to statically check common use cases of transmute.
1738 #[derive(Copy, Clone, Debug)]
1739 pub enum SizeSkeleton<'tcx> {
1740     /// Any statically computable Layout.
1741     Known(Size),
1742
1743     /// A potentially-fat pointer.
1744     Pointer {
1745         /// If true, this pointer is never null.
1746         non_zero: bool,
1747         /// The type which determines the unsized metadata, if any,
1748         /// of this pointer. Either a type parameter or a projection
1749         /// depending on one, with regions erased.
1750         tail: Ty<'tcx>,
1751     },
1752 }
1753
1754 impl<'tcx> SizeSkeleton<'tcx> {
1755     pub fn compute(
1756         ty: Ty<'tcx>,
1757         tcx: TyCtxt<'tcx>,
1758         param_env: ty::ParamEnv<'tcx>,
1759     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1760         debug_assert!(!ty.has_infer_types_or_consts());
1761
1762         // First try computing a static layout.
1763         let err = match tcx.layout_of(param_env.and(ty)) {
1764             Ok(layout) => {
1765                 return Ok(SizeSkeleton::Known(layout.size));
1766             }
1767             Err(err) => err,
1768         };
1769
1770         match ty.kind {
1771             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1772                 let non_zero = !ty.is_unsafe_ptr();
1773                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1774                 match tail.kind {
1775                     ty::Param(_) | ty::Projection(_) => {
1776                         debug_assert!(tail.has_param_types_or_consts());
1777                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
1778                     }
1779                     _ => bug!(
1780                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1781                               tail `{}` is not a type parameter or a projection",
1782                         ty,
1783                         err,
1784                         tail
1785                     ),
1786                 }
1787             }
1788
1789             ty::Adt(def, substs) => {
1790                 // Only newtypes and enums w/ nullable pointer optimization.
1791                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1792                     return Err(err);
1793                 }
1794
1795                 // Get a zero-sized variant or a pointer newtype.
1796                 let zero_or_ptr_variant = |i| {
1797                     let i = VariantIdx::new(i);
1798                     let fields = def.variants[i]
1799                         .fields
1800                         .iter()
1801                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1802                     let mut ptr = None;
1803                     for field in fields {
1804                         let field = field?;
1805                         match field {
1806                             SizeSkeleton::Known(size) => {
1807                                 if size.bytes() > 0 {
1808                                     return Err(err);
1809                                 }
1810                             }
1811                             SizeSkeleton::Pointer { .. } => {
1812                                 if ptr.is_some() {
1813                                     return Err(err);
1814                                 }
1815                                 ptr = Some(field);
1816                             }
1817                         }
1818                     }
1819                     Ok(ptr)
1820                 };
1821
1822                 let v0 = zero_or_ptr_variant(0)?;
1823                 // Newtype.
1824                 if def.variants.len() == 1 {
1825                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1826                         return Ok(SizeSkeleton::Pointer {
1827                             non_zero: non_zero
1828                                 || match tcx.layout_scalar_valid_range(def.did) {
1829                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1830                                     (Bound::Included(start), Bound::Included(end)) => {
1831                                         0 < start && start < end
1832                                     }
1833                                     _ => false,
1834                                 },
1835                             tail,
1836                         });
1837                     } else {
1838                         return Err(err);
1839                     }
1840                 }
1841
1842                 let v1 = zero_or_ptr_variant(1)?;
1843                 // Nullable pointer enum optimization.
1844                 match (v0, v1) {
1845                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1846                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1847                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1848                     }
1849                     _ => Err(err),
1850                 }
1851             }
1852
1853             ty::Projection(_) | ty::Opaque(..) => {
1854                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1855                 if ty == normalized {
1856                     Err(err)
1857                 } else {
1858                     SizeSkeleton::compute(normalized, tcx, param_env)
1859                 }
1860             }
1861
1862             _ => Err(err),
1863         }
1864     }
1865
1866     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1867         match (self, other) {
1868             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1869             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1870                 a == b
1871             }
1872             _ => false,
1873         }
1874     }
1875 }
1876
1877 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1878     fn tcx(&self) -> TyCtxt<'tcx>;
1879 }
1880
1881 pub trait HasParamEnv<'tcx> {
1882     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1883 }
1884
1885 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1886     fn data_layout(&self) -> &TargetDataLayout {
1887         &self.data_layout
1888     }
1889 }
1890
1891 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1892     fn tcx(&self) -> TyCtxt<'tcx> {
1893         *self
1894     }
1895 }
1896
1897 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1898     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1899         self.param_env
1900     }
1901 }
1902
1903 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1904     fn data_layout(&self) -> &TargetDataLayout {
1905         self.tcx.data_layout()
1906     }
1907 }
1908
1909 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1910     fn tcx(&self) -> TyCtxt<'tcx> {
1911         self.tcx.tcx()
1912     }
1913 }
1914
1915 pub type TyAndLayout<'tcx> = ::rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1916
1917 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1918     type Ty = Ty<'tcx>;
1919     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1920
1921     /// Computes the layout of a type. Note that this implicitly
1922     /// executes in "reveal all" mode.
1923     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1924         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1925         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1926         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1927         let layout = TyAndLayout { ty, layout };
1928
1929         // N.B., this recording is normally disabled; when enabled, it
1930         // can however trigger recursive invocations of `layout_of`.
1931         // Therefore, we execute it *after* the main query has
1932         // completed, to avoid problems around recursive structures
1933         // and the like. (Admittedly, I wasn't able to reproduce a problem
1934         // here, but it seems like the right thing to do. -nmatsakis)
1935         self.record_layout_for_printing(layout);
1936
1937         Ok(layout)
1938     }
1939 }
1940
1941 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1942     type Ty = Ty<'tcx>;
1943     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1944
1945     /// Computes the layout of a type. Note that this implicitly
1946     /// executes in "reveal all" mode.
1947     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1948         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
1949         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1950         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1951         let layout = TyAndLayout { ty, layout };
1952
1953         // N.B., this recording is normally disabled; when enabled, it
1954         // can however trigger recursive invocations of `layout_of`.
1955         // Therefore, we execute it *after* the main query has
1956         // completed, to avoid problems around recursive structures
1957         // and the like. (Admittedly, I wasn't able to reproduce a problem
1958         // here, but it seems like the right thing to do. -nmatsakis)
1959         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
1960         cx.record_layout_for_printing(layout);
1961
1962         Ok(layout)
1963     }
1964 }
1965
1966 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1967 impl TyCtxt<'tcx> {
1968     /// Computes the layout of a type. Note that this implicitly
1969     /// executes in "reveal all" mode.
1970     #[inline]
1971     pub fn layout_of(
1972         self,
1973         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1974     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1975         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
1976         cx.layout_of(param_env_and_ty.value)
1977     }
1978 }
1979
1980 impl ty::query::TyCtxtAt<'tcx> {
1981     /// Computes the layout of a type. Note that this implicitly
1982     /// executes in "reveal all" mode.
1983     #[inline]
1984     pub fn layout_of(
1985         self,
1986         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1987     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1988         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
1989         cx.layout_of(param_env_and_ty.value)
1990     }
1991 }
1992
1993 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
1994 where
1995     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
1996         + HasTyCtxt<'tcx>
1997         + HasParamEnv<'tcx>,
1998 {
1999     fn for_variant(
2000         this: TyAndLayout<'tcx>,
2001         cx: &C,
2002         variant_index: VariantIdx,
2003     ) -> TyAndLayout<'tcx> {
2004         let layout = match this.variants {
2005             Variants::Single { index }
2006                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2007                 if index == variant_index &&
2008                 // Don't confuse variants of uninhabited enums with the enum itself.
2009                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2010                 this.fields != FieldsShape::Primitive =>
2011             {
2012                 this.layout
2013             }
2014
2015             Variants::Single { index } => {
2016                 // Deny calling for_variant more than once for non-Single enums.
2017                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2018                     assert_eq!(original_layout.variants, Variants::Single { index });
2019                 }
2020
2021                 let fields = match this.ty.kind {
2022                     ty::Adt(def, _) if def.variants.is_empty() =>
2023                         bug!("for_variant called on zero-variant enum"),
2024                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2025                     _ => bug!(),
2026                 };
2027                 let tcx = cx.tcx();
2028                 tcx.intern_layout(Layout {
2029                     variants: Variants::Single { index: variant_index },
2030                     fields: match NonZeroUsize::new(fields) {
2031                         Some(fields) => FieldsShape::Union(fields),
2032                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2033                     },
2034                     abi: Abi::Uninhabited,
2035                     largest_niche: None,
2036                     align: tcx.data_layout.i8_align,
2037                     size: Size::ZERO,
2038                 })
2039             }
2040
2041             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2042         };
2043
2044         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2045
2046         TyAndLayout { ty: this.ty, layout }
2047     }
2048
2049     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2050         let tcx = cx.tcx();
2051         let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2052             let layout = Layout::scalar(cx, tag.clone());
2053             MaybeResult::from(Ok(TyAndLayout {
2054                 layout: tcx.intern_layout(layout),
2055                 ty: tag.value.to_ty(tcx),
2056             }))
2057         };
2058
2059         cx.layout_of(match this.ty.kind {
2060             ty::Bool
2061             | ty::Char
2062             | ty::Int(_)
2063             | ty::Uint(_)
2064             | ty::Float(_)
2065             | ty::FnPtr(_)
2066             | ty::Never
2067             | ty::FnDef(..)
2068             | ty::GeneratorWitness(..)
2069             | ty::Foreign(..)
2070             | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2071
2072             // Potentially-fat pointers.
2073             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2074                 assert!(i < this.fields.count());
2075
2076                 // Reuse the fat `*T` type as its own thin pointer data field.
2077                 // This provides information about, e.g., DST struct pointees
2078                 // (which may have no non-DST form), and will work as long
2079                 // as the `Abi` or `FieldsShape` is checked by users.
2080                 if i == 0 {
2081                     let nil = tcx.mk_unit();
2082                     let ptr_ty = if this.ty.is_unsafe_ptr() {
2083                         tcx.mk_mut_ptr(nil)
2084                     } else {
2085                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2086                     };
2087                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2088                         |mut ptr_layout| {
2089                             ptr_layout.ty = this.ty;
2090                             ptr_layout
2091                         },
2092                     ));
2093                 }
2094
2095                 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
2096                     ty::Slice(_) | ty::Str => tcx.types.usize,
2097                     ty::Dynamic(_, _) => {
2098                         tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2099                         /* FIXME: use actual fn pointers
2100                         Warning: naively computing the number of entries in the
2101                         vtable by counting the methods on the trait + methods on
2102                         all parent traits does not work, because some methods can
2103                         be not object safe and thus excluded from the vtable.
2104                         Increase this counter if you tried to implement this but
2105                         failed to do it without duplicating a lot of code from
2106                         other places in the compiler: 2
2107                         tcx.mk_tup(&[
2108                             tcx.mk_array(tcx.types.usize, 3),
2109                             tcx.mk_array(Option<fn()>),
2110                         ])
2111                         */
2112                     }
2113                     _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2114                 }
2115             }
2116
2117             // Arrays and slices.
2118             ty::Array(element, _) | ty::Slice(element) => element,
2119             ty::Str => tcx.types.u8,
2120
2121             // Tuples, generators and closures.
2122             ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
2123
2124             ty::Generator(def_id, ref substs, _) => match this.variants {
2125                 Variants::Single { index } => substs
2126                     .as_generator()
2127                     .state_tys(def_id, tcx)
2128                     .nth(index.as_usize())
2129                     .unwrap()
2130                     .nth(i)
2131                     .unwrap(),
2132                 Variants::Multiple { ref tag, tag_field, .. } => {
2133                     if i == tag_field {
2134                         return tag_layout(tag);
2135                     }
2136                     substs.as_generator().prefix_tys().nth(i).unwrap()
2137                 }
2138             },
2139
2140             ty::Tuple(tys) => tys[i].expect_ty(),
2141
2142             // SIMD vector types.
2143             ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
2144
2145             // ADTs.
2146             ty::Adt(def, substs) => {
2147                 match this.variants {
2148                     Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2149
2150                     // Discriminant field for enums (where applicable).
2151                     Variants::Multiple { ref tag, .. } => {
2152                         assert_eq!(i, 0);
2153                         return tag_layout(tag);
2154                     }
2155                 }
2156             }
2157
2158             ty::Projection(_)
2159             | ty::Bound(..)
2160             | ty::Placeholder(..)
2161             | ty::Opaque(..)
2162             | ty::Param(_)
2163             | ty::Infer(_)
2164             | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2165         })
2166     }
2167
2168     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2169         let addr_space_of_ty = |ty: Ty<'tcx>| {
2170             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2171         };
2172
2173         let pointee_info = match this.ty.kind {
2174             ty::RawPtr(mt) if offset.bytes() == 0 => {
2175                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2176                     size: layout.size,
2177                     align: layout.align.abi,
2178                     safe: None,
2179                     address_space: addr_space_of_ty(mt.ty),
2180                 })
2181             }
2182             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2183                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2184                     PointeeInfo {
2185                         size: layout.size,
2186                         align: layout.align.abi,
2187                         safe: None,
2188                         address_space: cx.data_layout().instruction_address_space,
2189                     }
2190                 })
2191             }
2192             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2193                 let address_space = addr_space_of_ty(ty);
2194                 let tcx = cx.tcx();
2195                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2196                 let kind = match mt {
2197                     hir::Mutability::Not => {
2198                         if is_freeze {
2199                             PointerKind::Frozen
2200                         } else {
2201                             PointerKind::Shared
2202                         }
2203                     }
2204                     hir::Mutability::Mut => {
2205                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2206                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2207                         // in conjunction with unwinding, but later versions didn’t seem to have
2208                         // said issues. See issue #31681.
2209                         //
2210                         // Alas, later on we encountered a case where noalias would generate wrong
2211                         // code altogether even with recent versions of LLVM in *safe* code with no
2212                         // unwinding involved. See #54462.
2213                         //
2214                         // For now, do not enable mutable_noalias by default at all, while the
2215                         // issue is being figured out.
2216                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2217                             PointerKind::UniqueBorrowed
2218                         } else {
2219                             PointerKind::Shared
2220                         }
2221                     }
2222                 };
2223
2224                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2225                     size: layout.size,
2226                     align: layout.align.abi,
2227                     safe: Some(kind),
2228                     address_space,
2229                 })
2230             }
2231
2232             _ => {
2233                 let mut data_variant = match this.variants {
2234                     // Within the discriminant field, only the niche itself is
2235                     // always initialized, so we only check for a pointer at its
2236                     // offset.
2237                     //
2238                     // If the niche is a pointer, it's either valid (according
2239                     // to its type), or null (which the niche field's scalar
2240                     // validity range encodes).  This allows using
2241                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2242                     // this will continue to work as long as we don't start
2243                     // using more niches than just null (e.g., the first page of
2244                     // the address space, or unaligned pointers).
2245                     Variants::Multiple {
2246                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2247                         tag_field,
2248                         ..
2249                     } if this.fields.offset(tag_field) == offset => {
2250                         Some(this.for_variant(cx, dataful_variant))
2251                     }
2252                     _ => Some(this),
2253                 };
2254
2255                 if let Some(variant) = data_variant {
2256                     // We're not interested in any unions.
2257                     if let FieldsShape::Union(_) = variant.fields {
2258                         data_variant = None;
2259                     }
2260                 }
2261
2262                 let mut result = None;
2263
2264                 if let Some(variant) = data_variant {
2265                     let ptr_end = offset + Pointer.size(cx);
2266                     for i in 0..variant.fields.count() {
2267                         let field_start = variant.fields.offset(i);
2268                         if field_start <= offset {
2269                             let field = variant.field(cx, i);
2270                             result = field.to_result().ok().and_then(|field| {
2271                                 if ptr_end <= field_start + field.size {
2272                                     // We found the right field, look inside it.
2273                                     let field_info =
2274                                         field.pointee_info_at(cx, offset - field_start);
2275                                     field_info
2276                                 } else {
2277                                     None
2278                                 }
2279                             });
2280                             if result.is_some() {
2281                                 break;
2282                             }
2283                         }
2284                     }
2285                 }
2286
2287                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2288                 if let Some(ref mut pointee) = result {
2289                     if let ty::Adt(def, _) = this.ty.kind {
2290                         if def.is_box() && offset.bytes() == 0 {
2291                             pointee.safe = Some(PointerKind::UniqueOwned);
2292                         }
2293                     }
2294                 }
2295
2296                 result
2297             }
2298         };
2299
2300         debug!(
2301             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2302             offset, this.ty.kind, pointee_info
2303         );
2304
2305         pointee_info
2306     }
2307 }
2308
2309 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2310     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2311         use crate::ty::layout::LayoutError::*;
2312         mem::discriminant(self).hash_stable(hcx, hasher);
2313
2314         match *self {
2315             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2316         }
2317     }
2318 }
2319
2320 impl<'tcx> ty::Instance<'tcx> {
2321     // NOTE(eddyb) this is private to avoid using it from outside of
2322     // `FnAbi::of_instance` - any other uses are either too high-level
2323     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2324     // or should go through `FnAbi` instead, to avoid losing any
2325     // adjustments `FnAbi::of_instance` might be performing.
2326     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2327         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2328         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2329         match ty.kind {
2330             ty::FnDef(..) => {
2331                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2332                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2333                 // (i.e. due to being inside a projection that got normalized, see
2334                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2335                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2336                 let mut sig = match ty.kind {
2337                     ty::FnDef(def_id, substs) => tcx
2338                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2339                         .subst(tcx, substs),
2340                     _ => unreachable!(),
2341                 };
2342
2343                 if let ty::InstanceDef::VtableShim(..) = self.def {
2344                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2345                     sig = sig.map_bound(|mut sig| {
2346                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2347                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2348                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2349                         sig
2350                     });
2351                 }
2352                 sig
2353             }
2354             ty::Closure(def_id, substs) => {
2355                 let sig = substs.as_closure().sig();
2356
2357                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2358                 sig.map_bound(|sig| {
2359                     tcx.mk_fn_sig(
2360                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2361                         sig.output(),
2362                         sig.c_variadic,
2363                         sig.unsafety,
2364                         sig.abi,
2365                     )
2366                 })
2367             }
2368             ty::Generator(_, substs, _) => {
2369                 let sig = substs.as_generator().poly_sig();
2370
2371                 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2372                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2373
2374                 let pin_did = tcx.require_lang_item(PinTypeLangItem, None);
2375                 let pin_adt_ref = tcx.adt_def(pin_did);
2376                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2377                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2378
2379                 sig.map_bound(|sig| {
2380                     let state_did = tcx.require_lang_item(GeneratorStateLangItem, None);
2381                     let state_adt_ref = tcx.adt_def(state_did);
2382                     let state_substs =
2383                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2384                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2385
2386                     tcx.mk_fn_sig(
2387                         [env_ty, sig.resume_ty].iter(),
2388                         &ret_ty,
2389                         false,
2390                         hir::Unsafety::Normal,
2391                         rustc_target::spec::abi::Abi::Rust,
2392                     )
2393                 })
2394             }
2395             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2396         }
2397     }
2398 }
2399
2400 pub trait FnAbiExt<'tcx, C>
2401 where
2402     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2403         + HasDataLayout
2404         + HasTargetSpec
2405         + HasTyCtxt<'tcx>
2406         + HasParamEnv<'tcx>,
2407 {
2408     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2409     ///
2410     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2411     /// instead, where the instance is a `InstanceDef::Virtual`.
2412     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2413
2414     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2415     /// direct calls to an `fn`.
2416     ///
2417     /// NB: that includes virtual calls, which are represented by "direct calls"
2418     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2419     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2420
2421     fn new_internal(
2422         cx: &C,
2423         sig: ty::PolyFnSig<'tcx>,
2424         extra_args: &[Ty<'tcx>],
2425         caller_location: Option<Ty<'tcx>>,
2426         codegen_fn_attr_flags: CodegenFnAttrFlags,
2427         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2428     ) -> Self;
2429     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2430 }
2431
2432 fn fn_can_unwind(
2433     panic_strategy: PanicStrategy,
2434     codegen_fn_attr_flags: CodegenFnAttrFlags,
2435     call_conv: Conv,
2436 ) -> bool {
2437     if panic_strategy != PanicStrategy::Unwind {
2438         // In panic=abort mode we assume nothing can unwind anywhere, so
2439         // optimize based on this!
2440         false
2441     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2442         // If a specific #[unwind] attribute is present, use that.
2443         true
2444     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2445         // Special attribute for allocator functions, which can't unwind.
2446         false
2447     } else {
2448         if call_conv == Conv::Rust {
2449             // Any Rust method (or `extern "Rust" fn` or `extern
2450             // "rust-call" fn`) is explicitly allowed to unwind
2451             // (unless it has no-unwind attribute, handled above).
2452             true
2453         } else {
2454             // Anything else is either:
2455             //
2456             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2457             //
2458             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2459             //
2460             // Foreign items (case 1) are assumed to not unwind; it is
2461             // UB otherwise. (At least for now; see also
2462             // rust-lang/rust#63909 and Rust RFC 2753.)
2463             //
2464             // Items defined in Rust with non-Rust ABIs (case 2) are also
2465             // not supposed to unwind. Whether this should be enforced
2466             // (versus stating it is UB) and *how* it would be enforced
2467             // is currently under discussion; see rust-lang/rust#58794.
2468             //
2469             // In either case, we mark item as explicitly nounwind.
2470             false
2471         }
2472     }
2473 }
2474
2475 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2476 where
2477     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2478         + HasDataLayout
2479         + HasTargetSpec
2480         + HasTyCtxt<'tcx>
2481         + HasParamEnv<'tcx>,
2482 {
2483     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2484         // Assume that fn pointers may always unwind
2485         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2486
2487         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2488             ArgAbi::new(cx.layout_of(ty))
2489         })
2490     }
2491
2492     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2493         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2494
2495         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2496             Some(cx.tcx().caller_location_ty())
2497         } else {
2498             None
2499         };
2500
2501         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2502
2503         call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2504             let mut layout = cx.layout_of(ty);
2505             // Don't pass the vtable, it's not an argument of the virtual fn.
2506             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2507             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2508             if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2509                 let fat_pointer_ty = if layout.is_unsized() {
2510                     // unsized `self` is passed as a pointer to `self`
2511                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2512                     cx.tcx().mk_mut_ptr(layout.ty)
2513                 } else {
2514                     match layout.abi {
2515                         Abi::ScalarPair(..) => (),
2516                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2517                     }
2518
2519                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2520                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2521                     // elsewhere in the compiler as a method on a `dyn Trait`.
2522                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2523                     // get a built-in pointer type
2524                     let mut fat_pointer_layout = layout;
2525                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2526                         && !fat_pointer_layout.ty.is_region_ptr()
2527                     {
2528                         for i in 0..fat_pointer_layout.fields.count() {
2529                             let field_layout = fat_pointer_layout.field(cx, i);
2530
2531                             if !field_layout.is_zst() {
2532                                 fat_pointer_layout = field_layout;
2533                                 continue 'descend_newtypes;
2534                             }
2535                         }
2536
2537                         bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2538                     }
2539
2540                     fat_pointer_layout.ty
2541                 };
2542
2543                 // we now have a type like `*mut RcBox<dyn Trait>`
2544                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2545                 // this is understood as a special case elsewhere in the compiler
2546                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2547                 layout = cx.layout_of(unit_pointer_ty);
2548                 layout.ty = fat_pointer_ty;
2549             }
2550             ArgAbi::new(layout)
2551         })
2552     }
2553
2554     fn new_internal(
2555         cx: &C,
2556         sig: ty::PolyFnSig<'tcx>,
2557         extra_args: &[Ty<'tcx>],
2558         caller_location: Option<Ty<'tcx>>,
2559         codegen_fn_attr_flags: CodegenFnAttrFlags,
2560         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2561     ) -> Self {
2562         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2563
2564         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2565
2566         use rustc_target::spec::abi::Abi::*;
2567         let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2568             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2569
2570             // It's the ABI's job to select this, not ours.
2571             System => bug!("system abi should be selected elsewhere"),
2572             EfiApi => bug!("eficall abi should be selected elsewhere"),
2573
2574             Stdcall => Conv::X86Stdcall,
2575             Fastcall => Conv::X86Fastcall,
2576             Vectorcall => Conv::X86VectorCall,
2577             Thiscall => Conv::X86ThisCall,
2578             C => Conv::C,
2579             Unadjusted => Conv::C,
2580             Win64 => Conv::X86_64Win64,
2581             SysV64 => Conv::X86_64SysV,
2582             Aapcs => Conv::ArmAapcs,
2583             PtxKernel => Conv::PtxKernel,
2584             Msp430Interrupt => Conv::Msp430Intr,
2585             X86Interrupt => Conv::X86Intr,
2586             AmdGpuKernel => Conv::AmdGpuKernel,
2587             AvrInterrupt => Conv::AvrInterrupt,
2588             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2589
2590             // These API constants ought to be more specific...
2591             Cdecl => Conv::C,
2592         };
2593
2594         let mut inputs = sig.inputs();
2595         let extra_args = if sig.abi == RustCall {
2596             assert!(!sig.c_variadic && extra_args.is_empty());
2597
2598             if let Some(input) = sig.inputs().last() {
2599                 if let ty::Tuple(tupled_arguments) = input.kind {
2600                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2601                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2602                 } else {
2603                     bug!(
2604                         "argument to function with \"rust-call\" ABI \
2605                             is not a tuple"
2606                     );
2607                 }
2608             } else {
2609                 bug!(
2610                     "argument to function with \"rust-call\" ABI \
2611                         is not a tuple"
2612                 );
2613             }
2614         } else {
2615             assert!(sig.c_variadic || extra_args.is_empty());
2616             extra_args.to_vec()
2617         };
2618
2619         let target = &cx.tcx().sess.target.target;
2620         let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
2621         let win_x64_gnu =
2622             target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2623         let linux_s390x_gnu_like =
2624             target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2625         let linux_sparc64_gnu_like =
2626             target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2627         let linux_powerpc_gnu_like =
2628             target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2629         let rust_abi = match sig.abi {
2630             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2631             _ => false,
2632         };
2633
2634         // Handle safe Rust thin and fat pointers.
2635         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2636                                       scalar: &Scalar,
2637                                       layout: TyAndLayout<'tcx>,
2638                                       offset: Size,
2639                                       is_return: bool| {
2640             // Booleans are always an i1 that needs to be zero-extended.
2641             if scalar.is_bool() {
2642                 attrs.set(ArgAttribute::ZExt);
2643                 return;
2644             }
2645
2646             // Only pointer types handled below.
2647             if scalar.value != Pointer {
2648                 return;
2649             }
2650
2651             if scalar.valid_range.start() < scalar.valid_range.end() {
2652                 if *scalar.valid_range.start() > 0 {
2653                     attrs.set(ArgAttribute::NonNull);
2654                 }
2655             }
2656
2657             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2658                 if let Some(kind) = pointee.safe {
2659                     attrs.pointee_align = Some(pointee.align);
2660
2661                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2662                     // for the entire duration of the function as they can be deallocated
2663                     // at any time. Set their valid size to 0.
2664                     attrs.pointee_size = match kind {
2665                         PointerKind::UniqueOwned => Size::ZERO,
2666                         _ => pointee.size,
2667                     };
2668
2669                     // `Box` pointer parameters never alias because ownership is transferred
2670                     // `&mut` pointer parameters never alias other parameters,
2671                     // or mutable global data
2672                     //
2673                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2674                     // and can be marked as both `readonly` and `noalias`, as
2675                     // LLVM's definition of `noalias` is based solely on memory
2676                     // dependencies rather than pointer equality
2677                     let no_alias = match kind {
2678                         PointerKind::Shared => false,
2679                         PointerKind::UniqueOwned => true,
2680                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2681                     };
2682                     if no_alias {
2683                         attrs.set(ArgAttribute::NoAlias);
2684                     }
2685
2686                     if kind == PointerKind::Frozen && !is_return {
2687                         attrs.set(ArgAttribute::ReadOnly);
2688                     }
2689                 }
2690             }
2691         };
2692
2693         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2694             let is_return = arg_idx.is_none();
2695             let mut arg = mk_arg_type(ty, arg_idx);
2696             if arg.layout.is_zst() {
2697                 // For some forsaken reason, x86_64-pc-windows-gnu
2698                 // doesn't ignore zero-sized struct arguments.
2699                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2700                 if is_return
2701                     || rust_abi
2702                     || (!win_x64_gnu
2703                         && !linux_s390x_gnu_like
2704                         && !linux_sparc64_gnu_like
2705                         && !linux_powerpc_gnu_like)
2706                 {
2707                     arg.mode = PassMode::Ignore;
2708                 }
2709             }
2710
2711             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2712             if !is_return && rust_abi {
2713                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2714                     let mut a_attrs = ArgAttributes::new();
2715                     let mut b_attrs = ArgAttributes::new();
2716                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2717                     adjust_for_rust_scalar(
2718                         &mut b_attrs,
2719                         b,
2720                         arg.layout,
2721                         a.value.size(cx).align_to(b.value.align(cx).abi),
2722                         false,
2723                     );
2724                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2725                     return arg;
2726                 }
2727             }
2728
2729             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2730                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2731                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2732                 }
2733             }
2734
2735             arg
2736         };
2737
2738         let mut fn_abi = FnAbi {
2739             ret: arg_of(sig.output(), None),
2740             args: inputs
2741                 .iter()
2742                 .cloned()
2743                 .chain(extra_args)
2744                 .chain(caller_location)
2745                 .enumerate()
2746                 .map(|(i, ty)| arg_of(ty, Some(i)))
2747                 .collect(),
2748             c_variadic: sig.c_variadic,
2749             fixed_count: inputs.len(),
2750             conv,
2751             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2752         };
2753         fn_abi.adjust_for_abi(cx, sig.abi);
2754         fn_abi
2755     }
2756
2757     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2758         if abi == SpecAbi::Unadjusted {
2759             return;
2760         }
2761
2762         if abi == SpecAbi::Rust
2763             || abi == SpecAbi::RustCall
2764             || abi == SpecAbi::RustIntrinsic
2765             || abi == SpecAbi::PlatformIntrinsic
2766         {
2767             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2768                 if arg.is_ignore() {
2769                     return;
2770                 }
2771
2772                 match arg.layout.abi {
2773                     Abi::Aggregate { .. } => {}
2774
2775                     // This is a fun case! The gist of what this is doing is
2776                     // that we want callers and callees to always agree on the
2777                     // ABI of how they pass SIMD arguments. If we were to *not*
2778                     // make these arguments indirect then they'd be immediates
2779                     // in LLVM, which means that they'd used whatever the
2780                     // appropriate ABI is for the callee and the caller. That
2781                     // means, for example, if the caller doesn't have AVX
2782                     // enabled but the callee does, then passing an AVX argument
2783                     // across this boundary would cause corrupt data to show up.
2784                     //
2785                     // This problem is fixed by unconditionally passing SIMD
2786                     // arguments through memory between callers and callees
2787                     // which should get them all to agree on ABI regardless of
2788                     // target feature sets. Some more information about this
2789                     // issue can be found in #44367.
2790                     //
2791                     // Note that the platform intrinsic ABI is exempt here as
2792                     // that's how we connect up to LLVM and it's unstable
2793                     // anyway, we control all calls to it in libstd.
2794                     Abi::Vector { .. }
2795                         if abi != SpecAbi::PlatformIntrinsic
2796                             && cx.tcx().sess.target.target.options.simd_types_indirect =>
2797                     {
2798                         arg.make_indirect();
2799                         return;
2800                     }
2801
2802                     _ => return,
2803                 }
2804
2805                 let size = arg.layout.size;
2806                 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2807                     arg.make_indirect();
2808                 } else {
2809                     // We want to pass small aggregates as immediates, but using
2810                     // a LLVM aggregate type for this leads to bad optimizations,
2811                     // so we pick an appropriately sized integer type instead.
2812                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2813                 }
2814             };
2815             fixup(&mut self.ret);
2816             for arg in &mut self.args {
2817                 fixup(arg);
2818             }
2819             if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2820                 attrs.set(ArgAttribute::StructRet);
2821             }
2822             return;
2823         }
2824
2825         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2826             cx.tcx().sess.fatal(&msg);
2827         }
2828     }
2829 }