]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Auto merge of #81458 - estebank:match-stmt-remove-semi, r=oli-obk
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast as ast;
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
35     fn repr_discr<'tcx>(
36         tcx: TyCtxt<'tcx>,
37         ty: Ty<'tcx>,
38         repr: &ReprOptions,
39         min: i128,
40         max: i128,
41     ) -> (Integer, bool);
42 }
43
44 impl IntegerExt for Integer {
45     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
46         match (*self, signed) {
47             (I8, false) => tcx.types.u8,
48             (I16, false) => tcx.types.u16,
49             (I32, false) => tcx.types.u32,
50             (I64, false) => tcx.types.u64,
51             (I128, false) => tcx.types.u128,
52             (I8, true) => tcx.types.i8,
53             (I16, true) => tcx.types.i16,
54             (I32, true) => tcx.types.i32,
55             (I64, true) => tcx.types.i64,
56             (I128, true) => tcx.types.i128,
57         }
58     }
59
60     /// Gets the Integer type from an attr::IntType.
61     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
62         let dl = cx.data_layout();
63
64         match ity {
65             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
66             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
67             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
68             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
69             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
70             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
71                 dl.ptr_sized_integer()
72             }
73         }
74     }
75
76     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
77         match ity {
78             ty::IntTy::I8 => I8,
79             ty::IntTy::I16 => I16,
80             ty::IntTy::I32 => I32,
81             ty::IntTy::I64 => I64,
82             ty::IntTy::I128 => I128,
83             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
84         }
85     }
86     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
87         match ity {
88             ty::UintTy::U8 => I8,
89             ty::UintTy::U16 => I16,
90             ty::UintTy::U32 => I32,
91             ty::UintTy::U64 => I64,
92             ty::UintTy::U128 => I128,
93             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
94         }
95     }
96
97     /// Finds the appropriate Integer type and signedness for the given
98     /// signed discriminant range and `#[repr]` attribute.
99     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
100     /// that shouldn't affect anything, other than maybe debuginfo.
101     fn repr_discr<'tcx>(
102         tcx: TyCtxt<'tcx>,
103         ty: Ty<'tcx>,
104         repr: &ReprOptions,
105         min: i128,
106         max: i128,
107     ) -> (Integer, bool) {
108         // Theoretically, negative values could be larger in unsigned representation
109         // than the unsigned representation of the signed minimum. However, if there
110         // are any negative values, the only valid unsigned representation is u128
111         // which can fit all i128 values, so the result remains unaffected.
112         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
113         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
114
115         let mut min_from_extern = None;
116         let min_default = I8;
117
118         if let Some(ity) = repr.int {
119             let discr = Integer::from_attr(&tcx, ity);
120             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
121             if discr < fit {
122                 bug!(
123                     "Integer::repr_discr: `#[repr]` hint too small for \
124                       discriminant range of enum `{}",
125                     ty
126                 )
127             }
128             return (discr, ity.is_signed());
129         }
130
131         if repr.c() {
132             match &tcx.sess.target.arch[..] {
133                 "hexagon" => min_from_extern = Some(I8),
134                 // WARNING: the ARM EABI has two variants; the one corresponding
135                 // to `at_least == I32` appears to be used on Linux and NetBSD,
136                 // but some systems may use the variant corresponding to no
137                 // lower bound. However, we don't run on those yet...?
138                 "arm" => min_from_extern = Some(I32),
139                 _ => min_from_extern = Some(I32),
140             }
141         }
142
143         let at_least = min_from_extern.unwrap_or(min_default);
144
145         // If there are no negative values, we can use the unsigned fit.
146         if min >= 0 {
147             (cmp::max(unsigned_fit, at_least), false)
148         } else {
149             (cmp::max(signed_fit, at_least), true)
150         }
151     }
152 }
153
154 pub trait PrimitiveExt {
155     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
157 }
158
159 impl PrimitiveExt for Primitive {
160     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161         match *self {
162             Int(i, signed) => i.to_ty(tcx, signed),
163             F32 => tcx.types.f32,
164             F64 => tcx.types.f64,
165             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
166         }
167     }
168
169     /// Return an *integer* type matching this primitive.
170     /// Useful in particular when dealing with enum discriminants.
171     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
172         match *self {
173             Int(i, signed) => i.to_ty(tcx, signed),
174             Pointer => tcx.types.usize,
175             F32 | F64 => bug!("floats do not have an int type"),
176         }
177     }
178 }
179
180 /// The first half of a fat pointer.
181 ///
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
185
186 /// The second half of a fat pointer.
187 ///
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
191
192 /// The maximum supported number of lanes in a SIMD vector.
193 ///
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
198
199 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
201     Unknown(Ty<'tcx>),
202     SizeOverflow(Ty<'tcx>),
203 }
204
205 impl<'tcx> fmt::Display for LayoutError<'tcx> {
206     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
207         match *self {
208             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
209             LayoutError::SizeOverflow(ty) => {
210                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
211             }
212         }
213     }
214 }
215
216 fn layout_raw<'tcx>(
217     tcx: TyCtxt<'tcx>,
218     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
219 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
220     ty::tls::with_related_context(tcx, move |icx| {
221         let (param_env, ty) = query.into_parts();
222
223         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
224             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
225         }
226
227         // Update the ImplicitCtxt to increase the layout_depth
228         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
229
230         ty::tls::enter_context(&icx, |_| {
231             let cx = LayoutCx { tcx, param_env };
232             let layout = cx.layout_raw_uncached(ty);
233             // Type-level uninhabitedness should always imply ABI uninhabitedness.
234             if let Ok(layout) = layout {
235                 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
236                     assert!(layout.abi.is_uninhabited());
237                 }
238             }
239             layout
240         })
241     })
242 }
243
244 pub fn provide(providers: &mut ty::query::Providers) {
245     *providers = ty::query::Providers { layout_raw, ..*providers };
246 }
247
248 pub struct LayoutCx<'tcx, C> {
249     pub tcx: C,
250     pub param_env: ty::ParamEnv<'tcx>,
251 }
252
253 #[derive(Copy, Clone, Debug)]
254 enum StructKind {
255     /// A tuple, closure, or univariant which cannot be coerced to unsized.
256     AlwaysSized,
257     /// A univariant, the last field of which may be coerced to unsized.
258     MaybeUnsized,
259     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
260     Prefixed(Size, Align),
261 }
262
263 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
264 // This is used to go between `memory_index` (source field order to memory order)
265 // and `inverse_memory_index` (memory order to source field order).
266 // See also `FieldsShape::Arbitrary::memory_index` for more details.
267 // FIXME(eddyb) build a better abstraction for permutations, if possible.
268 fn invert_mapping(map: &[u32]) -> Vec<u32> {
269     let mut inverse = vec![0; map.len()];
270     for i in 0..map.len() {
271         inverse[map[i] as usize] = i as u32;
272     }
273     inverse
274 }
275
276 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
277     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
278         let dl = self.data_layout();
279         let b_align = b.value.align(dl);
280         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
281         let b_offset = a.value.size(dl).align_to(b_align.abi);
282         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
283
284         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
285         // returns the last maximum.
286         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
287             .into_iter()
288             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
289             .max_by_key(|niche| niche.available(dl));
290
291         Layout {
292             variants: Variants::Single { index: VariantIdx::new(0) },
293             fields: FieldsShape::Arbitrary {
294                 offsets: vec![Size::ZERO, b_offset],
295                 memory_index: vec![0, 1],
296             },
297             abi: Abi::ScalarPair(a, b),
298             largest_niche,
299             align,
300             size,
301         }
302     }
303
304     fn univariant_uninterned(
305         &self,
306         ty: Ty<'tcx>,
307         fields: &[TyAndLayout<'_>],
308         repr: &ReprOptions,
309         kind: StructKind,
310     ) -> Result<Layout, LayoutError<'tcx>> {
311         let dl = self.data_layout();
312         let pack = repr.pack;
313         if pack.is_some() && repr.align.is_some() {
314             bug!("struct cannot be packed and aligned");
315         }
316
317         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
318
319         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
320
321         let optimize = !repr.inhibit_struct_field_reordering_opt();
322         if optimize {
323             let end =
324                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
325             let optimizing = &mut inverse_memory_index[..end];
326             let field_align = |f: &TyAndLayout<'_>| {
327                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
328             };
329             match kind {
330                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
331                     optimizing.sort_by_key(|&x| {
332                         // Place ZSTs first to avoid "interesting offsets",
333                         // especially with only one or two non-ZST fields.
334                         let f = &fields[x as usize];
335                         (!f.is_zst(), cmp::Reverse(field_align(f)))
336                     });
337                 }
338                 StructKind::Prefixed(..) => {
339                     // Sort in ascending alignment so that the layout stay optimal
340                     // regardless of the prefix
341                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
342                 }
343             }
344         }
345
346         // inverse_memory_index holds field indices by increasing memory offset.
347         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
348         // We now write field offsets to the corresponding offset slot;
349         // field 5 with offset 0 puts 0 in offsets[5].
350         // At the bottom of this function, we invert `inverse_memory_index` to
351         // produce `memory_index` (see `invert_mapping`).
352
353         let mut sized = true;
354         let mut offsets = vec![Size::ZERO; fields.len()];
355         let mut offset = Size::ZERO;
356         let mut largest_niche = None;
357         let mut largest_niche_available = 0;
358
359         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
360             let prefix_align =
361                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
362             align = align.max(AbiAndPrefAlign::new(prefix_align));
363             offset = prefix_size.align_to(prefix_align);
364         }
365
366         for &i in &inverse_memory_index {
367             let field = fields[i as usize];
368             if !sized {
369                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
370             }
371
372             if field.is_unsized() {
373                 sized = false;
374             }
375
376             // Invariant: offset < dl.obj_size_bound() <= 1<<61
377             let field_align = if let Some(pack) = pack {
378                 field.align.min(AbiAndPrefAlign::new(pack))
379             } else {
380                 field.align
381             };
382             offset = offset.align_to(field_align.abi);
383             align = align.max(field_align);
384
385             debug!("univariant offset: {:?} field: {:#?}", offset, field);
386             offsets[i as usize] = offset;
387
388             if !repr.hide_niche() {
389                 if let Some(mut niche) = field.largest_niche.clone() {
390                     let available = niche.available(dl);
391                     if available > largest_niche_available {
392                         largest_niche_available = available;
393                         niche.offset += offset;
394                         largest_niche = Some(niche);
395                     }
396                 }
397             }
398
399             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
400         }
401
402         if let Some(repr_align) = repr.align {
403             align = align.max(AbiAndPrefAlign::new(repr_align));
404         }
405
406         debug!("univariant min_size: {:?}", offset);
407         let min_size = offset;
408
409         // As stated above, inverse_memory_index holds field indices by increasing offset.
410         // This makes it an already-sorted view of the offsets vec.
411         // To invert it, consider:
412         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
413         // Field 5 would be the first element, so memory_index is i:
414         // Note: if we didn't optimize, it's already right.
415
416         let memory_index =
417             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
418
419         let size = min_size.align_to(align.abi);
420         let mut abi = Abi::Aggregate { sized };
421
422         // Unpack newtype ABIs and find scalar pairs.
423         if sized && size.bytes() > 0 {
424             // All other fields must be ZSTs.
425             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
426
427             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
428                 // We have exactly one non-ZST field.
429                 (Some((i, field)), None, None) => {
430                     // Field fills the struct and it has a scalar or scalar pair ABI.
431                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
432                     {
433                         match field.abi {
434                             // For plain scalars, or vectors of them, we can't unpack
435                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
436                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
437                                 abi = field.abi.clone();
438                             }
439                             // But scalar pairs are Rust-specific and get
440                             // treated as aggregates by C ABIs anyway.
441                             Abi::ScalarPair(..) => {
442                                 abi = field.abi.clone();
443                             }
444                             _ => {}
445                         }
446                     }
447                 }
448
449                 // Two non-ZST fields, and they're both scalars.
450                 (
451                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
452                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
453                     None,
454                 ) => {
455                     // Order by the memory placement, not source order.
456                     let ((i, a), (j, b)) =
457                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
458                     let pair = self.scalar_pair(a.clone(), b.clone());
459                     let pair_offsets = match pair.fields {
460                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
461                             assert_eq!(memory_index, &[0, 1]);
462                             offsets
463                         }
464                         _ => bug!(),
465                     };
466                     if offsets[i] == pair_offsets[0]
467                         && offsets[j] == pair_offsets[1]
468                         && align == pair.align
469                         && size == pair.size
470                     {
471                         // We can use `ScalarPair` only when it matches our
472                         // already computed layout (including `#[repr(C)]`).
473                         abi = pair.abi;
474                     }
475                 }
476
477                 _ => {}
478             }
479         }
480
481         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
482             abi = Abi::Uninhabited;
483         }
484
485         Ok(Layout {
486             variants: Variants::Single { index: VariantIdx::new(0) },
487             fields: FieldsShape::Arbitrary { offsets, memory_index },
488             abi,
489             largest_niche,
490             align,
491             size,
492         })
493     }
494
495     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
496         let tcx = self.tcx;
497         let param_env = self.param_env;
498         let dl = self.data_layout();
499         let scalar_unit = |value: Primitive| {
500             let bits = value.size(dl).bits();
501             assert!(bits <= 128);
502             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
503         };
504         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
505
506         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
507             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
508         };
509         debug_assert!(!ty.has_infer_types_or_consts());
510
511         Ok(match *ty.kind() {
512             // Basic scalars.
513             ty::Bool => tcx.intern_layout(Layout::scalar(
514                 self,
515                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
516             )),
517             ty::Char => tcx.intern_layout(Layout::scalar(
518                 self,
519                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
520             )),
521             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
522             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
523             ty::Float(fty) => scalar(match fty {
524                 ty::FloatTy::F32 => F32,
525                 ty::FloatTy::F64 => F64,
526             }),
527             ty::FnPtr(_) => {
528                 let mut ptr = scalar_unit(Pointer);
529                 ptr.valid_range = 1..=*ptr.valid_range.end();
530                 tcx.intern_layout(Layout::scalar(self, ptr))
531             }
532
533             // The never type.
534             ty::Never => tcx.intern_layout(Layout {
535                 variants: Variants::Single { index: VariantIdx::new(0) },
536                 fields: FieldsShape::Primitive,
537                 abi: Abi::Uninhabited,
538                 largest_niche: None,
539                 align: dl.i8_align,
540                 size: Size::ZERO,
541             }),
542
543             // Potentially-wide pointers.
544             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
545                 let mut data_ptr = scalar_unit(Pointer);
546                 if !ty.is_unsafe_ptr() {
547                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
548                 }
549
550                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
551                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
552                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
553                 }
554
555                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
556                 let metadata = match unsized_part.kind() {
557                     ty::Foreign(..) => {
558                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
559                     }
560                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
561                     ty::Dynamic(..) => {
562                         let mut vtable = scalar_unit(Pointer);
563                         vtable.valid_range = 1..=*vtable.valid_range.end();
564                         vtable
565                     }
566                     _ => return Err(LayoutError::Unknown(unsized_part)),
567                 };
568
569                 // Effectively a (ptr, meta) tuple.
570                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
571             }
572
573             // Arrays and slices.
574             ty::Array(element, mut count) => {
575                 if count.has_projections() {
576                     count = tcx.normalize_erasing_regions(param_env, count);
577                     if count.has_projections() {
578                         return Err(LayoutError::Unknown(ty));
579                     }
580                 }
581
582                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
583                 let element = self.layout_of(element)?;
584                 let size =
585                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
586
587                 let abi =
588                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
589                         Abi::Uninhabited
590                     } else {
591                         Abi::Aggregate { sized: true }
592                     };
593
594                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
595
596                 tcx.intern_layout(Layout {
597                     variants: Variants::Single { index: VariantIdx::new(0) },
598                     fields: FieldsShape::Array { stride: element.size, count },
599                     abi,
600                     largest_niche,
601                     align: element.align,
602                     size,
603                 })
604             }
605             ty::Slice(element) => {
606                 let element = self.layout_of(element)?;
607                 tcx.intern_layout(Layout {
608                     variants: Variants::Single { index: VariantIdx::new(0) },
609                     fields: FieldsShape::Array { stride: element.size, count: 0 },
610                     abi: Abi::Aggregate { sized: false },
611                     largest_niche: None,
612                     align: element.align,
613                     size: Size::ZERO,
614                 })
615             }
616             ty::Str => tcx.intern_layout(Layout {
617                 variants: Variants::Single { index: VariantIdx::new(0) },
618                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
619                 abi: Abi::Aggregate { sized: false },
620                 largest_niche: None,
621                 align: dl.i8_align,
622                 size: Size::ZERO,
623             }),
624
625             // Odd unit types.
626             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
627             ty::Dynamic(..) | ty::Foreign(..) => {
628                 let mut unit = self.univariant_uninterned(
629                     ty,
630                     &[],
631                     &ReprOptions::default(),
632                     StructKind::AlwaysSized,
633                 )?;
634                 match unit.abi {
635                     Abi::Aggregate { ref mut sized } => *sized = false,
636                     _ => bug!(),
637                 }
638                 tcx.intern_layout(unit)
639             }
640
641             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
642
643             ty::Closure(_, ref substs) => {
644                 let tys = substs.as_closure().upvar_tys();
645                 univariant(
646                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
647                     &ReprOptions::default(),
648                     StructKind::AlwaysSized,
649                 )?
650             }
651
652             ty::Tuple(tys) => {
653                 let kind =
654                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
655
656                 univariant(
657                     &tys.iter()
658                         .map(|k| self.layout_of(k.expect_ty()))
659                         .collect::<Result<Vec<_>, _>>()?,
660                     &ReprOptions::default(),
661                     kind,
662                 )?
663             }
664
665             // SIMD vector types.
666             ty::Adt(def, substs) if def.repr.simd() => {
667                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
668                 //
669                 // * #[repr(simd)] struct S(T, T, T, T);
670                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
671                 // * #[repr(simd)] struct S([T; 4])
672                 //
673                 // where T is a primitive scalar (integer/float/pointer).
674
675                 // SIMD vectors with zero fields are not supported.
676                 // (should be caught by typeck)
677                 if def.non_enum_variant().fields.is_empty() {
678                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
679                 }
680
681                 // Type of the first ADT field:
682                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
683
684                 // Heterogeneous SIMD vectors are not supported:
685                 // (should be caught by typeck)
686                 for fi in &def.non_enum_variant().fields {
687                     if fi.ty(tcx, substs) != f0_ty {
688                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
689                     }
690                 }
691
692                 // The element type and number of elements of the SIMD vector
693                 // are obtained from:
694                 //
695                 // * the element type and length of the single array field, if
696                 // the first field is of array type, or
697                 //
698                 // * the homogenous field type and the number of fields.
699                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
700                     // First ADT field is an array:
701
702                     // SIMD vectors with multiple array fields are not supported:
703                     // (should be caught by typeck)
704                     if def.non_enum_variant().fields.len() != 1 {
705                         tcx.sess.fatal(&format!(
706                             "monomorphising SIMD type `{}` with more than one array field",
707                             ty
708                         ));
709                     }
710
711                     // Extract the number of elements from the layout of the array field:
712                     let len = if let Ok(TyAndLayout {
713                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
714                         ..
715                     }) = self.layout_of(f0_ty)
716                     {
717                         count
718                     } else {
719                         return Err(LayoutError::Unknown(ty));
720                     };
721
722                     (*e_ty, *len, true)
723                 } else {
724                     // First ADT field is not an array:
725                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
726                 };
727
728                 // SIMD vectors of zero length are not supported.
729                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
730                 // support.
731                 //
732                 // Can't be caught in typeck if the array length is generic.
733                 if e_len == 0 {
734                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
735                 } else if !e_len.is_power_of_two() {
736                     tcx.sess.fatal(&format!(
737                         "monomorphising SIMD type `{}` of non-power-of-two length",
738                         ty
739                     ));
740                 } else if e_len > MAX_SIMD_LANES {
741                     tcx.sess.fatal(&format!(
742                         "monomorphising SIMD type `{}` of length greater than {}",
743                         ty, MAX_SIMD_LANES,
744                     ));
745                 }
746
747                 // Compute the ABI of the element type:
748                 let e_ly = self.layout_of(e_ty)?;
749                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
750                     scalar.clone()
751                 } else {
752                     // This error isn't caught in typeck, e.g., if
753                     // the element type of the vector is generic.
754                     tcx.sess.fatal(&format!(
755                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
756                         (integer/float/pointer) element type `{}`",
757                         ty, e_ty
758                     ))
759                 };
760
761                 // Compute the size and alignment of the vector:
762                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
763                 let align = dl.vector_align(size);
764                 let size = size.align_to(align.abi);
765
766                 // Compute the placement of the vector fields:
767                 let fields = if is_array {
768                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
769                 } else {
770                     FieldsShape::Array { stride: e_ly.size, count: e_len }
771                 };
772
773                 tcx.intern_layout(Layout {
774                     variants: Variants::Single { index: VariantIdx::new(0) },
775                     fields,
776                     abi: Abi::Vector { element: e_abi, count: e_len },
777                     largest_niche: e_ly.largest_niche.clone(),
778                     size,
779                     align,
780                 })
781             }
782
783             // ADTs.
784             ty::Adt(def, substs) => {
785                 // Cache the field layouts.
786                 let variants = def
787                     .variants
788                     .iter()
789                     .map(|v| {
790                         v.fields
791                             .iter()
792                             .map(|field| self.layout_of(field.ty(tcx, substs)))
793                             .collect::<Result<Vec<_>, _>>()
794                     })
795                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
796
797                 if def.is_union() {
798                     if def.repr.pack.is_some() && def.repr.align.is_some() {
799                         bug!("union cannot be packed and aligned");
800                     }
801
802                     let mut align =
803                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
804
805                     if let Some(repr_align) = def.repr.align {
806                         align = align.max(AbiAndPrefAlign::new(repr_align));
807                     }
808
809                     let optimize = !def.repr.inhibit_union_abi_opt();
810                     let mut size = Size::ZERO;
811                     let mut abi = Abi::Aggregate { sized: true };
812                     let index = VariantIdx::new(0);
813                     for field in &variants[index] {
814                         assert!(!field.is_unsized());
815                         align = align.max(field.align);
816
817                         // If all non-ZST fields have the same ABI, forward this ABI
818                         if optimize && !field.is_zst() {
819                             // Normalize scalar_unit to the maximal valid range
820                             let field_abi = match &field.abi {
821                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
822                                 Abi::ScalarPair(x, y) => {
823                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
824                                 }
825                                 Abi::Vector { element: x, count } => {
826                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
827                                 }
828                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
829                                     Abi::Aggregate { sized: true }
830                                 }
831                             };
832
833                             if size == Size::ZERO {
834                                 // first non ZST: initialize 'abi'
835                                 abi = field_abi;
836                             } else if abi != field_abi {
837                                 // different fields have different ABI: reset to Aggregate
838                                 abi = Abi::Aggregate { sized: true };
839                             }
840                         }
841
842                         size = cmp::max(size, field.size);
843                     }
844
845                     if let Some(pack) = def.repr.pack {
846                         align = align.min(AbiAndPrefAlign::new(pack));
847                     }
848
849                     return Ok(tcx.intern_layout(Layout {
850                         variants: Variants::Single { index },
851                         fields: FieldsShape::Union(
852                             NonZeroUsize::new(variants[index].len())
853                                 .ok_or(LayoutError::Unknown(ty))?,
854                         ),
855                         abi,
856                         largest_niche: None,
857                         align,
858                         size: size.align_to(align.abi),
859                     }));
860                 }
861
862                 // A variant is absent if it's uninhabited and only has ZST fields.
863                 // Present uninhabited variants only require space for their fields,
864                 // but *not* an encoding of the discriminant (e.g., a tag value).
865                 // See issue #49298 for more details on the need to leave space
866                 // for non-ZST uninhabited data (mostly partial initialization).
867                 let absent = |fields: &[TyAndLayout<'_>]| {
868                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
869                     let is_zst = fields.iter().all(|f| f.is_zst());
870                     uninhabited && is_zst
871                 };
872                 let (present_first, present_second) = {
873                     let mut present_variants = variants
874                         .iter_enumerated()
875                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
876                     (present_variants.next(), present_variants.next())
877                 };
878                 let present_first = match present_first {
879                     Some(present_first) => present_first,
880                     // Uninhabited because it has no variants, or only absent ones.
881                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
882                     // If it's a struct, still compute a layout so that we can still compute the
883                     // field offsets.
884                     None => VariantIdx::new(0),
885                 };
886
887                 let is_struct = !def.is_enum() ||
888                     // Only one variant is present.
889                     (present_second.is_none() &&
890                     // Representation optimizations are allowed.
891                     !def.repr.inhibit_enum_layout_opt());
892                 if is_struct {
893                     // Struct, or univariant enum equivalent to a struct.
894                     // (Typechecking will reject discriminant-sizing attrs.)
895
896                     let v = present_first;
897                     let kind = if def.is_enum() || variants[v].is_empty() {
898                         StructKind::AlwaysSized
899                     } else {
900                         let param_env = tcx.param_env(def.did);
901                         let last_field = def.variants[v].fields.last().unwrap();
902                         let always_sized =
903                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
904                         if !always_sized {
905                             StructKind::MaybeUnsized
906                         } else {
907                             StructKind::AlwaysSized
908                         }
909                     };
910
911                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
912                     st.variants = Variants::Single { index: v };
913                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
914                     match st.abi {
915                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
916                             // the asserts ensure that we are not using the
917                             // `#[rustc_layout_scalar_valid_range(n)]`
918                             // attribute to widen the range of anything as that would probably
919                             // result in UB somewhere
920                             // FIXME(eddyb) the asserts are probably not needed,
921                             // as larger validity ranges would result in missed
922                             // optimizations, *not* wrongly assuming the inner
923                             // value is valid. e.g. unions enlarge validity ranges,
924                             // because the values may be uninitialized.
925                             if let Bound::Included(start) = start {
926                                 // FIXME(eddyb) this might be incorrect - it doesn't
927                                 // account for wrap-around (end < start) ranges.
928                                 assert!(*scalar.valid_range.start() <= start);
929                                 scalar.valid_range = start..=*scalar.valid_range.end();
930                             }
931                             if let Bound::Included(end) = end {
932                                 // FIXME(eddyb) this might be incorrect - it doesn't
933                                 // account for wrap-around (end < start) ranges.
934                                 assert!(*scalar.valid_range.end() >= end);
935                                 scalar.valid_range = *scalar.valid_range.start()..=end;
936                             }
937
938                             // Update `largest_niche` if we have introduced a larger niche.
939                             let niche = if def.repr.hide_niche() {
940                                 None
941                             } else {
942                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
943                             };
944                             if let Some(niche) = niche {
945                                 match &st.largest_niche {
946                                     Some(largest_niche) => {
947                                         // Replace the existing niche even if they're equal,
948                                         // because this one is at a lower offset.
949                                         if largest_niche.available(dl) <= niche.available(dl) {
950                                             st.largest_niche = Some(niche);
951                                         }
952                                     }
953                                     None => st.largest_niche = Some(niche),
954                                 }
955                             }
956                         }
957                         _ => assert!(
958                             start == Bound::Unbounded && end == Bound::Unbounded,
959                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
960                             def,
961                             st,
962                         ),
963                     }
964
965                     return Ok(tcx.intern_layout(st));
966                 }
967
968                 // At this point, we have handled all unions and
969                 // structs. (We have also handled univariant enums
970                 // that allow representation optimization.)
971                 assert!(def.is_enum());
972
973                 // The current code for niche-filling relies on variant indices
974                 // instead of actual discriminants, so dataful enums with
975                 // explicit discriminants (RFC #2363) would misbehave.
976                 let no_explicit_discriminants = def
977                     .variants
978                     .iter_enumerated()
979                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
980
981                 let mut niche_filling_layout = None;
982
983                 // Niche-filling enum optimization.
984                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
985                     let mut dataful_variant = None;
986                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
987
988                     // Find one non-ZST variant.
989                     'variants: for (v, fields) in variants.iter_enumerated() {
990                         if absent(fields) {
991                             continue 'variants;
992                         }
993                         for f in fields {
994                             if !f.is_zst() {
995                                 if dataful_variant.is_none() {
996                                     dataful_variant = Some(v);
997                                     continue 'variants;
998                                 } else {
999                                     dataful_variant = None;
1000                                     break 'variants;
1001                                 }
1002                             }
1003                         }
1004                         niche_variants = *niche_variants.start().min(&v)..=v;
1005                     }
1006
1007                     if niche_variants.start() > niche_variants.end() {
1008                         dataful_variant = None;
1009                     }
1010
1011                     if let Some(i) = dataful_variant {
1012                         let count = (niche_variants.end().as_u32()
1013                             - niche_variants.start().as_u32()
1014                             + 1) as u128;
1015
1016                         // Find the field with the largest niche
1017                         let niche_candidate = variants[i]
1018                             .iter()
1019                             .enumerate()
1020                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1021                             .max_by_key(|(_, niche)| niche.available(dl));
1022
1023                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1024                             niche_candidate.and_then(|(field_index, niche)| {
1025                                 Some((field_index, niche, niche.reserve(self, count)?))
1026                             })
1027                         {
1028                             let mut align = dl.aggregate_align;
1029                             let st = variants
1030                                 .iter_enumerated()
1031                                 .map(|(j, v)| {
1032                                     let mut st = self.univariant_uninterned(
1033                                         ty,
1034                                         v,
1035                                         &def.repr,
1036                                         StructKind::AlwaysSized,
1037                                     )?;
1038                                     st.variants = Variants::Single { index: j };
1039
1040                                     align = align.max(st.align);
1041
1042                                     Ok(st)
1043                                 })
1044                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1045
1046                             let offset = st[i].fields.offset(field_index) + niche.offset;
1047                             let size = st[i].size;
1048
1049                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1050                                 Abi::Uninhabited
1051                             } else {
1052                                 match st[i].abi {
1053                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1054                                     Abi::ScalarPair(ref first, ref second) => {
1055                                         // We need to use scalar_unit to reset the
1056                                         // valid range to the maximal one for that
1057                                         // primitive, because only the niche is
1058                                         // guaranteed to be initialised, not the
1059                                         // other primitive.
1060                                         if offset.bytes() == 0 {
1061                                             Abi::ScalarPair(
1062                                                 niche_scalar.clone(),
1063                                                 scalar_unit(second.value),
1064                                             )
1065                                         } else {
1066                                             Abi::ScalarPair(
1067                                                 scalar_unit(first.value),
1068                                                 niche_scalar.clone(),
1069                                             )
1070                                         }
1071                                     }
1072                                     _ => Abi::Aggregate { sized: true },
1073                                 }
1074                             };
1075
1076                             let largest_niche =
1077                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1078
1079                             niche_filling_layout = Some(Layout {
1080                                 variants: Variants::Multiple {
1081                                     tag: niche_scalar,
1082                                     tag_encoding: TagEncoding::Niche {
1083                                         dataful_variant: i,
1084                                         niche_variants,
1085                                         niche_start,
1086                                     },
1087                                     tag_field: 0,
1088                                     variants: st,
1089                                 },
1090                                 fields: FieldsShape::Arbitrary {
1091                                     offsets: vec![offset],
1092                                     memory_index: vec![0],
1093                                 },
1094                                 abi,
1095                                 largest_niche,
1096                                 size,
1097                                 align,
1098                             });
1099                         }
1100                     }
1101                 }
1102
1103                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1104                 let discr_type = def.repr.discr_type();
1105                 let bits = Integer::from_attr(self, discr_type).size().bits();
1106                 for (i, discr) in def.discriminants(tcx) {
1107                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1108                         continue;
1109                     }
1110                     let mut x = discr.val as i128;
1111                     if discr_type.is_signed() {
1112                         // sign extend the raw representation to be an i128
1113                         x = (x << (128 - bits)) >> (128 - bits);
1114                     }
1115                     if x < min {
1116                         min = x;
1117                     }
1118                     if x > max {
1119                         max = x;
1120                     }
1121                 }
1122                 // We might have no inhabited variants, so pretend there's at least one.
1123                 if (min, max) == (i128::MAX, i128::MIN) {
1124                     min = 0;
1125                     max = 0;
1126                 }
1127                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1128                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1129
1130                 let mut align = dl.aggregate_align;
1131                 let mut size = Size::ZERO;
1132
1133                 // We're interested in the smallest alignment, so start large.
1134                 let mut start_align = Align::from_bytes(256).unwrap();
1135                 assert_eq!(Integer::for_align(dl, start_align), None);
1136
1137                 // repr(C) on an enum tells us to make a (tag, union) layout,
1138                 // so we need to grow the prefix alignment to be at least
1139                 // the alignment of the union. (This value is used both for
1140                 // determining the alignment of the overall enum, and the
1141                 // determining the alignment of the payload after the tag.)
1142                 let mut prefix_align = min_ity.align(dl).abi;
1143                 if def.repr.c() {
1144                     for fields in &variants {
1145                         for field in fields {
1146                             prefix_align = prefix_align.max(field.align.abi);
1147                         }
1148                     }
1149                 }
1150
1151                 // Create the set of structs that represent each variant.
1152                 let mut layout_variants = variants
1153                     .iter_enumerated()
1154                     .map(|(i, field_layouts)| {
1155                         let mut st = self.univariant_uninterned(
1156                             ty,
1157                             &field_layouts,
1158                             &def.repr,
1159                             StructKind::Prefixed(min_ity.size(), prefix_align),
1160                         )?;
1161                         st.variants = Variants::Single { index: i };
1162                         // Find the first field we can't move later
1163                         // to make room for a larger discriminant.
1164                         for field in
1165                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1166                         {
1167                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1168                                 start_align = start_align.min(field.align.abi);
1169                                 break;
1170                             }
1171                         }
1172                         size = cmp::max(size, st.size);
1173                         align = align.max(st.align);
1174                         Ok(st)
1175                     })
1176                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1177
1178                 // Align the maximum variant size to the largest alignment.
1179                 size = size.align_to(align.abi);
1180
1181                 if size.bytes() >= dl.obj_size_bound() {
1182                     return Err(LayoutError::SizeOverflow(ty));
1183                 }
1184
1185                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1186                 if typeck_ity < min_ity {
1187                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1188                     // some reason at this point (based on values discriminant can take on). Mostly
1189                     // because this discriminant will be loaded, and then stored into variable of
1190                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1191                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1192                     // discriminant values. That would be a bug, because then, in codegen, in order
1193                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1194                     // space necessary to represent would have to be discarded (or layout is wrong
1195                     // on thinking it needs 16 bits)
1196                     bug!(
1197                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1198                         min_ity,
1199                         typeck_ity
1200                     );
1201                     // However, it is fine to make discr type however large (as an optimisation)
1202                     // after this point â€“ we’ll just truncate the value we load in codegen.
1203                 }
1204
1205                 // Check to see if we should use a different type for the
1206                 // discriminant. We can safely use a type with the same size
1207                 // as the alignment of the first field of each variant.
1208                 // We increase the size of the discriminant to avoid LLVM copying
1209                 // padding when it doesn't need to. This normally causes unaligned
1210                 // load/stores and excessive memcpy/memset operations. By using a
1211                 // bigger integer size, LLVM can be sure about its contents and
1212                 // won't be so conservative.
1213
1214                 // Use the initial field alignment
1215                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1216                     min_ity
1217                 } else {
1218                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1219                 };
1220
1221                 // If the alignment is not larger than the chosen discriminant size,
1222                 // don't use the alignment as the final size.
1223                 if ity <= min_ity {
1224                     ity = min_ity;
1225                 } else {
1226                     // Patch up the variants' first few fields.
1227                     let old_ity_size = min_ity.size();
1228                     let new_ity_size = ity.size();
1229                     for variant in &mut layout_variants {
1230                         match variant.fields {
1231                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1232                                 for i in offsets {
1233                                     if *i <= old_ity_size {
1234                                         assert_eq!(*i, old_ity_size);
1235                                         *i = new_ity_size;
1236                                     }
1237                                 }
1238                                 // We might be making the struct larger.
1239                                 if variant.size <= old_ity_size {
1240                                     variant.size = new_ity_size;
1241                                 }
1242                             }
1243                             _ => bug!(),
1244                         }
1245                     }
1246                 }
1247
1248                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1249                 let tag = Scalar {
1250                     value: Int(ity, signed),
1251                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1252                 };
1253                 let mut abi = Abi::Aggregate { sized: true };
1254                 if tag.value.size(dl) == size {
1255                     abi = Abi::Scalar(tag.clone());
1256                 } else {
1257                     // Try to use a ScalarPair for all tagged enums.
1258                     let mut common_prim = None;
1259                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1260                         let offsets = match layout_variant.fields {
1261                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1262                             _ => bug!(),
1263                         };
1264                         let mut fields =
1265                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1266                         let (field, offset) = match (fields.next(), fields.next()) {
1267                             (None, None) => continue,
1268                             (Some(pair), None) => pair,
1269                             _ => {
1270                                 common_prim = None;
1271                                 break;
1272                             }
1273                         };
1274                         let prim = match field.abi {
1275                             Abi::Scalar(ref scalar) => scalar.value,
1276                             _ => {
1277                                 common_prim = None;
1278                                 break;
1279                             }
1280                         };
1281                         if let Some(pair) = common_prim {
1282                             // This is pretty conservative. We could go fancier
1283                             // by conflating things like i32 and u32, or even
1284                             // realising that (u8, u8) could just cohabit with
1285                             // u16 or even u32.
1286                             if pair != (prim, offset) {
1287                                 common_prim = None;
1288                                 break;
1289                             }
1290                         } else {
1291                             common_prim = Some((prim, offset));
1292                         }
1293                     }
1294                     if let Some((prim, offset)) = common_prim {
1295                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1296                         let pair_offsets = match pair.fields {
1297                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1298                                 assert_eq!(memory_index, &[0, 1]);
1299                                 offsets
1300                             }
1301                             _ => bug!(),
1302                         };
1303                         if pair_offsets[0] == Size::ZERO
1304                             && pair_offsets[1] == *offset
1305                             && align == pair.align
1306                             && size == pair.size
1307                         {
1308                             // We can use `ScalarPair` only when it matches our
1309                             // already computed layout (including `#[repr(C)]`).
1310                             abi = pair.abi;
1311                         }
1312                     }
1313                 }
1314
1315                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1316                     abi = Abi::Uninhabited;
1317                 }
1318
1319                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1320
1321                 let tagged_layout = Layout {
1322                     variants: Variants::Multiple {
1323                         tag,
1324                         tag_encoding: TagEncoding::Direct,
1325                         tag_field: 0,
1326                         variants: layout_variants,
1327                     },
1328                     fields: FieldsShape::Arbitrary {
1329                         offsets: vec![Size::ZERO],
1330                         memory_index: vec![0],
1331                     },
1332                     largest_niche,
1333                     abi,
1334                     align,
1335                     size,
1336                 };
1337
1338                 let best_layout = match (tagged_layout, niche_filling_layout) {
1339                     (tagged_layout, Some(niche_filling_layout)) => {
1340                         // Pick the smaller layout; otherwise,
1341                         // pick the layout with the larger niche; otherwise,
1342                         // pick tagged as it has simpler codegen.
1343                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1344                             let niche_size =
1345                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1346                             (layout.size, cmp::Reverse(niche_size))
1347                         })
1348                     }
1349                     (tagged_layout, None) => tagged_layout,
1350                 };
1351
1352                 tcx.intern_layout(best_layout)
1353             }
1354
1355             // Types with no meaningful known layout.
1356             ty::Projection(_) | ty::Opaque(..) => {
1357                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1358                 if ty == normalized {
1359                     return Err(LayoutError::Unknown(ty));
1360                 }
1361                 tcx.layout_raw(param_env.and(normalized))?
1362             }
1363
1364             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1365                 bug!("Layout::compute: unexpected type `{}`", ty)
1366             }
1367
1368             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1369                 return Err(LayoutError::Unknown(ty));
1370             }
1371         })
1372     }
1373 }
1374
1375 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1376 #[derive(Clone, Debug, PartialEq)]
1377 enum SavedLocalEligibility {
1378     Unassigned,
1379     Assigned(VariantIdx),
1380     // FIXME: Use newtype_index so we aren't wasting bytes
1381     Ineligible(Option<u32>),
1382 }
1383
1384 // When laying out generators, we divide our saved local fields into two
1385 // categories: overlap-eligible and overlap-ineligible.
1386 //
1387 // Those fields which are ineligible for overlap go in a "prefix" at the
1388 // beginning of the layout, and always have space reserved for them.
1389 //
1390 // Overlap-eligible fields are only assigned to one variant, so we lay
1391 // those fields out for each variant and put them right after the
1392 // prefix.
1393 //
1394 // Finally, in the layout details, we point to the fields from the
1395 // variants they are assigned to. It is possible for some fields to be
1396 // included in multiple variants. No field ever "moves around" in the
1397 // layout; its offset is always the same.
1398 //
1399 // Also included in the layout are the upvars and the discriminant.
1400 // These are included as fields on the "outer" layout; they are not part
1401 // of any variant.
1402 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1403     /// Compute the eligibility and assignment of each local.
1404     fn generator_saved_local_eligibility(
1405         &self,
1406         info: &GeneratorLayout<'tcx>,
1407     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1408         use SavedLocalEligibility::*;
1409
1410         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1411             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1412
1413         // The saved locals not eligible for overlap. These will get
1414         // "promoted" to the prefix of our generator.
1415         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1416
1417         // Figure out which of our saved locals are fields in only
1418         // one variant. The rest are deemed ineligible for overlap.
1419         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1420             for local in fields {
1421                 match assignments[*local] {
1422                     Unassigned => {
1423                         assignments[*local] = Assigned(variant_index);
1424                     }
1425                     Assigned(idx) => {
1426                         // We've already seen this local at another suspension
1427                         // point, so it is no longer a candidate.
1428                         trace!(
1429                             "removing local {:?} in >1 variant ({:?}, {:?})",
1430                             local,
1431                             variant_index,
1432                             idx
1433                         );
1434                         ineligible_locals.insert(*local);
1435                         assignments[*local] = Ineligible(None);
1436                     }
1437                     Ineligible(_) => {}
1438                 }
1439             }
1440         }
1441
1442         // Next, check every pair of eligible locals to see if they
1443         // conflict.
1444         for local_a in info.storage_conflicts.rows() {
1445             let conflicts_a = info.storage_conflicts.count(local_a);
1446             if ineligible_locals.contains(local_a) {
1447                 continue;
1448             }
1449
1450             for local_b in info.storage_conflicts.iter(local_a) {
1451                 // local_a and local_b are storage live at the same time, therefore they
1452                 // cannot overlap in the generator layout. The only way to guarantee
1453                 // this is if they are in the same variant, or one is ineligible
1454                 // (which means it is stored in every variant).
1455                 if ineligible_locals.contains(local_b)
1456                     || assignments[local_a] == assignments[local_b]
1457                 {
1458                     continue;
1459                 }
1460
1461                 // If they conflict, we will choose one to make ineligible.
1462                 // This is not always optimal; it's just a greedy heuristic that
1463                 // seems to produce good results most of the time.
1464                 let conflicts_b = info.storage_conflicts.count(local_b);
1465                 let (remove, other) =
1466                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1467                 ineligible_locals.insert(remove);
1468                 assignments[remove] = Ineligible(None);
1469                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1470             }
1471         }
1472
1473         // Count the number of variants in use. If only one of them, then it is
1474         // impossible to overlap any locals in our layout. In this case it's
1475         // always better to make the remaining locals ineligible, so we can
1476         // lay them out with the other locals in the prefix and eliminate
1477         // unnecessary padding bytes.
1478         {
1479             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1480             for assignment in &assignments {
1481                 if let Assigned(idx) = assignment {
1482                     used_variants.insert(*idx);
1483                 }
1484             }
1485             if used_variants.count() < 2 {
1486                 for assignment in assignments.iter_mut() {
1487                     *assignment = Ineligible(None);
1488                 }
1489                 ineligible_locals.insert_all();
1490             }
1491         }
1492
1493         // Write down the order of our locals that will be promoted to the prefix.
1494         {
1495             for (idx, local) in ineligible_locals.iter().enumerate() {
1496                 assignments[local] = Ineligible(Some(idx as u32));
1497             }
1498         }
1499         debug!("generator saved local assignments: {:?}", assignments);
1500
1501         (ineligible_locals, assignments)
1502     }
1503
1504     /// Compute the full generator layout.
1505     fn generator_layout(
1506         &self,
1507         ty: Ty<'tcx>,
1508         def_id: hir::def_id::DefId,
1509         substs: SubstsRef<'tcx>,
1510     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1511         use SavedLocalEligibility::*;
1512         let tcx = self.tcx;
1513         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1514
1515         let info = match tcx.generator_layout(def_id) {
1516             None => return Err(LayoutError::Unknown(ty)),
1517             Some(info) => info,
1518         };
1519         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1520
1521         // Build a prefix layout, including "promoting" all ineligible
1522         // locals as part of the prefix. We compute the layout of all of
1523         // these fields at once to get optimal packing.
1524         let tag_index = substs.as_generator().prefix_tys().count();
1525
1526         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1527         let max_discr = (info.variant_fields.len() - 1) as u128;
1528         let discr_int = Integer::fit_unsigned(max_discr);
1529         let discr_int_ty = discr_int.to_ty(tcx, false);
1530         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1531         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1532         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1533
1534         let promoted_layouts = ineligible_locals
1535             .iter()
1536             .map(|local| subst_field(info.field_tys[local]))
1537             .map(|ty| tcx.mk_maybe_uninit(ty))
1538             .map(|ty| self.layout_of(ty));
1539         let prefix_layouts = substs
1540             .as_generator()
1541             .prefix_tys()
1542             .map(|ty| self.layout_of(ty))
1543             .chain(iter::once(Ok(tag_layout)))
1544             .chain(promoted_layouts)
1545             .collect::<Result<Vec<_>, _>>()?;
1546         let prefix = self.univariant_uninterned(
1547             ty,
1548             &prefix_layouts,
1549             &ReprOptions::default(),
1550             StructKind::AlwaysSized,
1551         )?;
1552
1553         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1554
1555         // Split the prefix layout into the "outer" fields (upvars and
1556         // discriminant) and the "promoted" fields. Promoted fields will
1557         // get included in each variant that requested them in
1558         // GeneratorLayout.
1559         debug!("prefix = {:#?}", prefix);
1560         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1561             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1562                 let mut inverse_memory_index = invert_mapping(&memory_index);
1563
1564                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1565                 // "outer" and "promoted" fields respectively.
1566                 let b_start = (tag_index + 1) as u32;
1567                 let offsets_b = offsets.split_off(b_start as usize);
1568                 let offsets_a = offsets;
1569
1570                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1571                 // by preserving the order but keeping only one disjoint "half" each.
1572                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1573                 let inverse_memory_index_b: Vec<_> =
1574                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1575                 inverse_memory_index.retain(|&i| i < b_start);
1576                 let inverse_memory_index_a = inverse_memory_index;
1577
1578                 // Since `inverse_memory_index_{a,b}` each only refer to their
1579                 // respective fields, they can be safely inverted
1580                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1581                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1582
1583                 let outer_fields =
1584                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1585                 (outer_fields, offsets_b, memory_index_b)
1586             }
1587             _ => bug!(),
1588         };
1589
1590         let mut size = prefix.size;
1591         let mut align = prefix.align;
1592         let variants = info
1593             .variant_fields
1594             .iter_enumerated()
1595             .map(|(index, variant_fields)| {
1596                 // Only include overlap-eligible fields when we compute our variant layout.
1597                 let variant_only_tys = variant_fields
1598                     .iter()
1599                     .filter(|local| match assignments[**local] {
1600                         Unassigned => bug!(),
1601                         Assigned(v) if v == index => true,
1602                         Assigned(_) => bug!("assignment does not match variant"),
1603                         Ineligible(_) => false,
1604                     })
1605                     .map(|local| subst_field(info.field_tys[*local]));
1606
1607                 let mut variant = self.univariant_uninterned(
1608                     ty,
1609                     &variant_only_tys
1610                         .map(|ty| self.layout_of(ty))
1611                         .collect::<Result<Vec<_>, _>>()?,
1612                     &ReprOptions::default(),
1613                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1614                 )?;
1615                 variant.variants = Variants::Single { index };
1616
1617                 let (offsets, memory_index) = match variant.fields {
1618                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1619                     _ => bug!(),
1620                 };
1621
1622                 // Now, stitch the promoted and variant-only fields back together in
1623                 // the order they are mentioned by our GeneratorLayout.
1624                 // Because we only use some subset (that can differ between variants)
1625                 // of the promoted fields, we can't just pick those elements of the
1626                 // `promoted_memory_index` (as we'd end up with gaps).
1627                 // So instead, we build an "inverse memory_index", as if all of the
1628                 // promoted fields were being used, but leave the elements not in the
1629                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1630                 // obtain a valid (bijective) mapping.
1631                 const INVALID_FIELD_IDX: u32 = !0;
1632                 let mut combined_inverse_memory_index =
1633                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1634                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1635                 let combined_offsets = variant_fields
1636                     .iter()
1637                     .enumerate()
1638                     .map(|(i, local)| {
1639                         let (offset, memory_index) = match assignments[*local] {
1640                             Unassigned => bug!(),
1641                             Assigned(_) => {
1642                                 let (offset, memory_index) =
1643                                     offsets_and_memory_index.next().unwrap();
1644                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1645                             }
1646                             Ineligible(field_idx) => {
1647                                 let field_idx = field_idx.unwrap() as usize;
1648                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1649                             }
1650                         };
1651                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1652                         offset
1653                     })
1654                     .collect();
1655
1656                 // Remove the unused slots and invert the mapping to obtain the
1657                 // combined `memory_index` (also see previous comment).
1658                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1659                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1660
1661                 variant.fields = FieldsShape::Arbitrary {
1662                     offsets: combined_offsets,
1663                     memory_index: combined_memory_index,
1664                 };
1665
1666                 size = size.max(variant.size);
1667                 align = align.max(variant.align);
1668                 Ok(variant)
1669             })
1670             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1671
1672         size = size.align_to(align.abi);
1673
1674         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1675         {
1676             Abi::Uninhabited
1677         } else {
1678             Abi::Aggregate { sized: true }
1679         };
1680
1681         let layout = tcx.intern_layout(Layout {
1682             variants: Variants::Multiple {
1683                 tag,
1684                 tag_encoding: TagEncoding::Direct,
1685                 tag_field: tag_index,
1686                 variants,
1687             },
1688             fields: outer_fields,
1689             abi,
1690             largest_niche: prefix.largest_niche,
1691             size,
1692             align,
1693         });
1694         debug!("generator layout ({:?}): {:#?}", ty, layout);
1695         Ok(layout)
1696     }
1697
1698     /// This is invoked by the `layout_raw` query to record the final
1699     /// layout of each type.
1700     #[inline(always)]
1701     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1702         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1703         // for dumping later.
1704         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1705             self.record_layout_for_printing_outlined(layout)
1706         }
1707     }
1708
1709     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1710         // Ignore layouts that are done with non-empty environments or
1711         // non-monomorphic layouts, as the user only wants to see the stuff
1712         // resulting from the final codegen session.
1713         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1714             return;
1715         }
1716
1717         // (delay format until we actually need it)
1718         let record = |kind, packed, opt_discr_size, variants| {
1719             let type_desc = format!("{:?}", layout.ty);
1720             self.tcx.sess.code_stats.record_type_size(
1721                 kind,
1722                 type_desc,
1723                 layout.align.abi,
1724                 layout.size,
1725                 packed,
1726                 opt_discr_size,
1727                 variants,
1728             );
1729         };
1730
1731         let adt_def = match *layout.ty.kind() {
1732             ty::Adt(ref adt_def, _) => {
1733                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1734                 adt_def
1735             }
1736
1737             ty::Closure(..) => {
1738                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1739                 record(DataTypeKind::Closure, false, None, vec![]);
1740                 return;
1741             }
1742
1743             _ => {
1744                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1745                 return;
1746             }
1747         };
1748
1749         let adt_kind = adt_def.adt_kind();
1750         let adt_packed = adt_def.repr.pack.is_some();
1751
1752         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1753             let mut min_size = Size::ZERO;
1754             let field_info: Vec<_> = flds
1755                 .iter()
1756                 .enumerate()
1757                 .map(|(i, &name)| match layout.field(self, i) {
1758                     Err(err) => {
1759                         bug!("no layout found for field {}: `{:?}`", name, err);
1760                     }
1761                     Ok(field_layout) => {
1762                         let offset = layout.fields.offset(i);
1763                         let field_end = offset + field_layout.size;
1764                         if min_size < field_end {
1765                             min_size = field_end;
1766                         }
1767                         FieldInfo {
1768                             name: name.to_string(),
1769                             offset: offset.bytes(),
1770                             size: field_layout.size.bytes(),
1771                             align: field_layout.align.abi.bytes(),
1772                         }
1773                     }
1774                 })
1775                 .collect();
1776
1777             VariantInfo {
1778                 name: n.map(|n| n.to_string()),
1779                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1780                 align: layout.align.abi.bytes(),
1781                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1782                 fields: field_info,
1783             }
1784         };
1785
1786         match layout.variants {
1787             Variants::Single { index } => {
1788                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1789                 if !adt_def.variants.is_empty() {
1790                     let variant_def = &adt_def.variants[index];
1791                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1792                     record(
1793                         adt_kind.into(),
1794                         adt_packed,
1795                         None,
1796                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1797                     );
1798                 } else {
1799                     // (This case arises for *empty* enums; so give it
1800                     // zero variants.)
1801                     record(adt_kind.into(), adt_packed, None, vec![]);
1802                 }
1803             }
1804
1805             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1806                 debug!(
1807                     "print-type-size `{:#?}` adt general variants def {}",
1808                     layout.ty,
1809                     adt_def.variants.len()
1810                 );
1811                 let variant_infos: Vec<_> = adt_def
1812                     .variants
1813                     .iter_enumerated()
1814                     .map(|(i, variant_def)| {
1815                         let fields: Vec<_> =
1816                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1817                         build_variant_info(
1818                             Some(variant_def.ident),
1819                             &fields,
1820                             layout.for_variant(self, i),
1821                         )
1822                     })
1823                     .collect();
1824                 record(
1825                     adt_kind.into(),
1826                     adt_packed,
1827                     match tag_encoding {
1828                         TagEncoding::Direct => Some(tag.value.size(self)),
1829                         _ => None,
1830                     },
1831                     variant_infos,
1832                 );
1833             }
1834         }
1835     }
1836 }
1837
1838 /// Type size "skeleton", i.e., the only information determining a type's size.
1839 /// While this is conservative, (aside from constant sizes, only pointers,
1840 /// newtypes thereof and null pointer optimized enums are allowed), it is
1841 /// enough to statically check common use cases of transmute.
1842 #[derive(Copy, Clone, Debug)]
1843 pub enum SizeSkeleton<'tcx> {
1844     /// Any statically computable Layout.
1845     Known(Size),
1846
1847     /// A potentially-fat pointer.
1848     Pointer {
1849         /// If true, this pointer is never null.
1850         non_zero: bool,
1851         /// The type which determines the unsized metadata, if any,
1852         /// of this pointer. Either a type parameter or a projection
1853         /// depending on one, with regions erased.
1854         tail: Ty<'tcx>,
1855     },
1856 }
1857
1858 impl<'tcx> SizeSkeleton<'tcx> {
1859     pub fn compute(
1860         ty: Ty<'tcx>,
1861         tcx: TyCtxt<'tcx>,
1862         param_env: ty::ParamEnv<'tcx>,
1863     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1864         debug_assert!(!ty.has_infer_types_or_consts());
1865
1866         // First try computing a static layout.
1867         let err = match tcx.layout_of(param_env.and(ty)) {
1868             Ok(layout) => {
1869                 return Ok(SizeSkeleton::Known(layout.size));
1870             }
1871             Err(err) => err,
1872         };
1873
1874         match *ty.kind() {
1875             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1876                 let non_zero = !ty.is_unsafe_ptr();
1877                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1878                 match tail.kind() {
1879                     ty::Param(_) | ty::Projection(_) => {
1880                         debug_assert!(tail.has_param_types_or_consts());
1881                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1882                     }
1883                     _ => bug!(
1884                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1885                               tail `{}` is not a type parameter or a projection",
1886                         ty,
1887                         err,
1888                         tail
1889                     ),
1890                 }
1891             }
1892
1893             ty::Adt(def, substs) => {
1894                 // Only newtypes and enums w/ nullable pointer optimization.
1895                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1896                     return Err(err);
1897                 }
1898
1899                 // Get a zero-sized variant or a pointer newtype.
1900                 let zero_or_ptr_variant = |i| {
1901                     let i = VariantIdx::new(i);
1902                     let fields = def.variants[i]
1903                         .fields
1904                         .iter()
1905                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1906                     let mut ptr = None;
1907                     for field in fields {
1908                         let field = field?;
1909                         match field {
1910                             SizeSkeleton::Known(size) => {
1911                                 if size.bytes() > 0 {
1912                                     return Err(err);
1913                                 }
1914                             }
1915                             SizeSkeleton::Pointer { .. } => {
1916                                 if ptr.is_some() {
1917                                     return Err(err);
1918                                 }
1919                                 ptr = Some(field);
1920                             }
1921                         }
1922                     }
1923                     Ok(ptr)
1924                 };
1925
1926                 let v0 = zero_or_ptr_variant(0)?;
1927                 // Newtype.
1928                 if def.variants.len() == 1 {
1929                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1930                         return Ok(SizeSkeleton::Pointer {
1931                             non_zero: non_zero
1932                                 || match tcx.layout_scalar_valid_range(def.did) {
1933                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1934                                     (Bound::Included(start), Bound::Included(end)) => {
1935                                         0 < start && start < end
1936                                     }
1937                                     _ => false,
1938                                 },
1939                             tail,
1940                         });
1941                     } else {
1942                         return Err(err);
1943                     }
1944                 }
1945
1946                 let v1 = zero_or_ptr_variant(1)?;
1947                 // Nullable pointer enum optimization.
1948                 match (v0, v1) {
1949                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1950                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1951                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1952                     }
1953                     _ => Err(err),
1954                 }
1955             }
1956
1957             ty::Projection(_) | ty::Opaque(..) => {
1958                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1959                 if ty == normalized {
1960                     Err(err)
1961                 } else {
1962                     SizeSkeleton::compute(normalized, tcx, param_env)
1963                 }
1964             }
1965
1966             _ => Err(err),
1967         }
1968     }
1969
1970     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1971         match (self, other) {
1972             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1973             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1974                 a == b
1975             }
1976             _ => false,
1977         }
1978     }
1979 }
1980
1981 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1982     fn tcx(&self) -> TyCtxt<'tcx>;
1983 }
1984
1985 pub trait HasParamEnv<'tcx> {
1986     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1987 }
1988
1989 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1990     fn data_layout(&self) -> &TargetDataLayout {
1991         &self.data_layout
1992     }
1993 }
1994
1995 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1996     fn tcx(&self) -> TyCtxt<'tcx> {
1997         *self
1998     }
1999 }
2000
2001 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2002     fn param_env(&self) -> ty::ParamEnv<'tcx> {
2003         self.param_env
2004     }
2005 }
2006
2007 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2008     fn data_layout(&self) -> &TargetDataLayout {
2009         self.tcx.data_layout()
2010     }
2011 }
2012
2013 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2014     fn tcx(&self) -> TyCtxt<'tcx> {
2015         self.tcx.tcx()
2016     }
2017 }
2018
2019 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2020
2021 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
2022     type Ty = Ty<'tcx>;
2023     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2024
2025     /// Computes the layout of a type. Note that this implicitly
2026     /// executes in "reveal all" mode.
2027     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2028         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
2029         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2030         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2031         let layout = TyAndLayout { ty, layout };
2032
2033         // N.B., this recording is normally disabled; when enabled, it
2034         // can however trigger recursive invocations of `layout_of`.
2035         // Therefore, we execute it *after* the main query has
2036         // completed, to avoid problems around recursive structures
2037         // and the like. (Admittedly, I wasn't able to reproduce a problem
2038         // here, but it seems like the right thing to do. -nmatsakis)
2039         self.record_layout_for_printing(layout);
2040
2041         Ok(layout)
2042     }
2043 }
2044
2045 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2046     type Ty = Ty<'tcx>;
2047     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2048
2049     /// Computes the layout of a type. Note that this implicitly
2050     /// executes in "reveal all" mode.
2051     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2052         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2053         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2054         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2055         let layout = TyAndLayout { ty, layout };
2056
2057         // N.B., this recording is normally disabled; when enabled, it
2058         // can however trigger recursive invocations of `layout_of`.
2059         // Therefore, we execute it *after* the main query has
2060         // completed, to avoid problems around recursive structures
2061         // and the like. (Admittedly, I wasn't able to reproduce a problem
2062         // here, but it seems like the right thing to do. -nmatsakis)
2063         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2064         cx.record_layout_for_printing(layout);
2065
2066         Ok(layout)
2067     }
2068 }
2069
2070 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2071 impl TyCtxt<'tcx> {
2072     /// Computes the layout of a type. Note that this implicitly
2073     /// executes in "reveal all" mode.
2074     #[inline]
2075     pub fn layout_of(
2076         self,
2077         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2078     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2079         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2080         cx.layout_of(param_env_and_ty.value)
2081     }
2082 }
2083
2084 impl ty::query::TyCtxtAt<'tcx> {
2085     /// Computes the layout of a type. Note that this implicitly
2086     /// executes in "reveal all" mode.
2087     #[inline]
2088     pub fn layout_of(
2089         self,
2090         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2091     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2092         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2093         cx.layout_of(param_env_and_ty.value)
2094     }
2095 }
2096
2097 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2098 where
2099     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2100         + HasTyCtxt<'tcx>
2101         + HasParamEnv<'tcx>,
2102 {
2103     fn for_variant(
2104         this: TyAndLayout<'tcx>,
2105         cx: &C,
2106         variant_index: VariantIdx,
2107     ) -> TyAndLayout<'tcx> {
2108         let layout = match this.variants {
2109             Variants::Single { index }
2110                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2111                 if index == variant_index &&
2112                 // Don't confuse variants of uninhabited enums with the enum itself.
2113                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2114                 this.fields != FieldsShape::Primitive =>
2115             {
2116                 this.layout
2117             }
2118
2119             Variants::Single { index } => {
2120                 // Deny calling for_variant more than once for non-Single enums.
2121                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2122                     assert_eq!(original_layout.variants, Variants::Single { index });
2123                 }
2124
2125                 let fields = match this.ty.kind() {
2126                     ty::Adt(def, _) if def.variants.is_empty() =>
2127                         bug!("for_variant called on zero-variant enum"),
2128                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2129                     _ => bug!(),
2130                 };
2131                 let tcx = cx.tcx();
2132                 tcx.intern_layout(Layout {
2133                     variants: Variants::Single { index: variant_index },
2134                     fields: match NonZeroUsize::new(fields) {
2135                         Some(fields) => FieldsShape::Union(fields),
2136                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2137                     },
2138                     abi: Abi::Uninhabited,
2139                     largest_niche: None,
2140                     align: tcx.data_layout.i8_align,
2141                     size: Size::ZERO,
2142                 })
2143             }
2144
2145             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2146         };
2147
2148         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2149
2150         TyAndLayout { ty: this.ty, layout }
2151     }
2152
2153     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2154         enum TyMaybeWithLayout<C: LayoutOf> {
2155             Ty(C::Ty),
2156             TyAndLayout(C::TyAndLayout),
2157         }
2158
2159         fn ty_and_layout_kind<
2160             C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2161                 + HasTyCtxt<'tcx>
2162                 + HasParamEnv<'tcx>,
2163         >(
2164             this: TyAndLayout<'tcx>,
2165             cx: &C,
2166             i: usize,
2167             ty: C::Ty,
2168         ) -> TyMaybeWithLayout<C> {
2169             let tcx = cx.tcx();
2170             let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2171                 let layout = Layout::scalar(cx, tag.clone());
2172                 MaybeResult::from(Ok(TyAndLayout {
2173                     layout: tcx.intern_layout(layout),
2174                     ty: tag.value.to_ty(tcx),
2175                 }))
2176             };
2177
2178             match *ty.kind() {
2179                 ty::Bool
2180                 | ty::Char
2181                 | ty::Int(_)
2182                 | ty::Uint(_)
2183                 | ty::Float(_)
2184                 | ty::FnPtr(_)
2185                 | ty::Never
2186                 | ty::FnDef(..)
2187                 | ty::GeneratorWitness(..)
2188                 | ty::Foreign(..)
2189                 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2190
2191                 // Potentially-fat pointers.
2192                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2193                     assert!(i < this.fields.count());
2194
2195                     // Reuse the fat `*T` type as its own thin pointer data field.
2196                     // This provides information about, e.g., DST struct pointees
2197                     // (which may have no non-DST form), and will work as long
2198                     // as the `Abi` or `FieldsShape` is checked by users.
2199                     if i == 0 {
2200                         let nil = tcx.mk_unit();
2201                         let ptr_ty = if ty.is_unsafe_ptr() {
2202                             tcx.mk_mut_ptr(nil)
2203                         } else {
2204                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2205                         };
2206                         return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2207                             cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2208                                 ptr_layout.ty = ty;
2209                                 ptr_layout
2210                             }),
2211                         ));
2212                     }
2213
2214                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2215                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2216                         ty::Dynamic(_, _) => {
2217                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2218                                 tcx.lifetimes.re_static,
2219                                 tcx.mk_array(tcx.types.usize, 3),
2220                             ))
2221                             /* FIXME: use actual fn pointers
2222                             Warning: naively computing the number of entries in the
2223                             vtable by counting the methods on the trait + methods on
2224                             all parent traits does not work, because some methods can
2225                             be not object safe and thus excluded from the vtable.
2226                             Increase this counter if you tried to implement this but
2227                             failed to do it without duplicating a lot of code from
2228                             other places in the compiler: 2
2229                             tcx.mk_tup(&[
2230                                 tcx.mk_array(tcx.types.usize, 3),
2231                                 tcx.mk_array(Option<fn()>),
2232                             ])
2233                             */
2234                         }
2235                         _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2236                     }
2237                 }
2238
2239                 // Arrays and slices.
2240                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2241                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2242
2243                 // Tuples, generators and closures.
2244                 ty::Closure(_, ref substs) => {
2245                     ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2246                 }
2247
2248                 ty::Generator(def_id, ref substs, _) => match this.variants {
2249                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2250                         substs
2251                             .as_generator()
2252                             .state_tys(def_id, tcx)
2253                             .nth(index.as_usize())
2254                             .unwrap()
2255                             .nth(i)
2256                             .unwrap(),
2257                     ),
2258                     Variants::Multiple { ref tag, tag_field, .. } => {
2259                         if i == tag_field {
2260                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2261                         }
2262                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2263                     }
2264                 },
2265
2266                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2267
2268                 // ADTs.
2269                 ty::Adt(def, substs) => {
2270                     match this.variants {
2271                         Variants::Single { index } => {
2272                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2273                         }
2274
2275                         // Discriminant field for enums (where applicable).
2276                         Variants::Multiple { ref tag, .. } => {
2277                             assert_eq!(i, 0);
2278                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2279                         }
2280                     }
2281                 }
2282
2283                 ty::Projection(_)
2284                 | ty::Bound(..)
2285                 | ty::Placeholder(..)
2286                 | ty::Opaque(..)
2287                 | ty::Param(_)
2288                 | ty::Infer(_)
2289                 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2290             }
2291         }
2292
2293         cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2294             TyMaybeWithLayout::Ty(result) => result,
2295             TyMaybeWithLayout::TyAndLayout(result) => return result,
2296         })
2297     }
2298
2299     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2300         let addr_space_of_ty = |ty: Ty<'tcx>| {
2301             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2302         };
2303
2304         let pointee_info = match *this.ty.kind() {
2305             ty::RawPtr(mt) if offset.bytes() == 0 => {
2306                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2307                     size: layout.size,
2308                     align: layout.align.abi,
2309                     safe: None,
2310                     address_space: addr_space_of_ty(mt.ty),
2311                 })
2312             }
2313             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2314                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2315                     PointeeInfo {
2316                         size: layout.size,
2317                         align: layout.align.abi,
2318                         safe: None,
2319                         address_space: cx.data_layout().instruction_address_space,
2320                     }
2321                 })
2322             }
2323             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2324                 let address_space = addr_space_of_ty(ty);
2325                 let tcx = cx.tcx();
2326                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2327                 let kind = match mt {
2328                     hir::Mutability::Not => {
2329                         if is_freeze {
2330                             PointerKind::Frozen
2331                         } else {
2332                             PointerKind::Shared
2333                         }
2334                     }
2335                     hir::Mutability::Mut => {
2336                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2337                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2338                         // in conjunction with unwinding, but later versions didn’t seem to have
2339                         // said issues. See issue #31681.
2340                         //
2341                         // Alas, later on we encountered a case where noalias would generate wrong
2342                         // code altogether even with recent versions of LLVM in *safe* code with no
2343                         // unwinding involved. See #54462.
2344                         //
2345                         // For now, do not enable mutable_noalias by default at all, while the
2346                         // issue is being figured out.
2347                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2348                             PointerKind::UniqueBorrowed
2349                         } else {
2350                             PointerKind::Shared
2351                         }
2352                     }
2353                 };
2354
2355                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2356                     size: layout.size,
2357                     align: layout.align.abi,
2358                     safe: Some(kind),
2359                     address_space,
2360                 })
2361             }
2362
2363             _ => {
2364                 let mut data_variant = match this.variants {
2365                     // Within the discriminant field, only the niche itself is
2366                     // always initialized, so we only check for a pointer at its
2367                     // offset.
2368                     //
2369                     // If the niche is a pointer, it's either valid (according
2370                     // to its type), or null (which the niche field's scalar
2371                     // validity range encodes).  This allows using
2372                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2373                     // this will continue to work as long as we don't start
2374                     // using more niches than just null (e.g., the first page of
2375                     // the address space, or unaligned pointers).
2376                     Variants::Multiple {
2377                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2378                         tag_field,
2379                         ..
2380                     } if this.fields.offset(tag_field) == offset => {
2381                         Some(this.for_variant(cx, dataful_variant))
2382                     }
2383                     _ => Some(this),
2384                 };
2385
2386                 if let Some(variant) = data_variant {
2387                     // We're not interested in any unions.
2388                     if let FieldsShape::Union(_) = variant.fields {
2389                         data_variant = None;
2390                     }
2391                 }
2392
2393                 let mut result = None;
2394
2395                 if let Some(variant) = data_variant {
2396                     let ptr_end = offset + Pointer.size(cx);
2397                     for i in 0..variant.fields.count() {
2398                         let field_start = variant.fields.offset(i);
2399                         if field_start <= offset {
2400                             let field = variant.field(cx, i);
2401                             result = field.to_result().ok().and_then(|field| {
2402                                 if ptr_end <= field_start + field.size {
2403                                     // We found the right field, look inside it.
2404                                     let field_info =
2405                                         field.pointee_info_at(cx, offset - field_start);
2406                                     field_info
2407                                 } else {
2408                                     None
2409                                 }
2410                             });
2411                             if result.is_some() {
2412                                 break;
2413                             }
2414                         }
2415                     }
2416                 }
2417
2418                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2419                 if let Some(ref mut pointee) = result {
2420                     if let ty::Adt(def, _) = this.ty.kind() {
2421                         if def.is_box() && offset.bytes() == 0 {
2422                             pointee.safe = Some(PointerKind::UniqueOwned);
2423                         }
2424                     }
2425                 }
2426
2427                 result
2428             }
2429         };
2430
2431         debug!(
2432             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2433             offset,
2434             this.ty.kind(),
2435             pointee_info
2436         );
2437
2438         pointee_info
2439     }
2440 }
2441
2442 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2443     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2444         use crate::ty::layout::LayoutError::*;
2445         mem::discriminant(self).hash_stable(hcx, hasher);
2446
2447         match *self {
2448             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2449         }
2450     }
2451 }
2452
2453 impl<'tcx> ty::Instance<'tcx> {
2454     // NOTE(eddyb) this is private to avoid using it from outside of
2455     // `FnAbi::of_instance` - any other uses are either too high-level
2456     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2457     // or should go through `FnAbi` instead, to avoid losing any
2458     // adjustments `FnAbi::of_instance` might be performing.
2459     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2460         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2461         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2462         match *ty.kind() {
2463             ty::FnDef(..) => {
2464                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2465                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2466                 // (i.e. due to being inside a projection that got normalized, see
2467                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2468                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2469                 let mut sig = match *ty.kind() {
2470                     ty::FnDef(def_id, substs) => tcx
2471                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2472                         .subst(tcx, substs),
2473                     _ => unreachable!(),
2474                 };
2475
2476                 if let ty::InstanceDef::VtableShim(..) = self.def {
2477                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2478                     sig = sig.map_bound(|mut sig| {
2479                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2480                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2481                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2482                         sig
2483                     });
2484                 }
2485                 sig
2486             }
2487             ty::Closure(def_id, substs) => {
2488                 let sig = substs.as_closure().sig();
2489
2490                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2491                 sig.map_bound(|sig| {
2492                     tcx.mk_fn_sig(
2493                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2494                         sig.output(),
2495                         sig.c_variadic,
2496                         sig.unsafety,
2497                         sig.abi,
2498                     )
2499                 })
2500             }
2501             ty::Generator(_, substs, _) => {
2502                 let sig = substs.as_generator().poly_sig();
2503
2504                 let br = ty::BoundRegion { kind: ty::BrEnv };
2505                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2506                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2507
2508                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2509                 let pin_adt_ref = tcx.adt_def(pin_did);
2510                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2511                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2512
2513                 sig.map_bound(|sig| {
2514                     let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2515                     let state_adt_ref = tcx.adt_def(state_did);
2516                     let state_substs =
2517                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2518                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2519
2520                     tcx.mk_fn_sig(
2521                         [env_ty, sig.resume_ty].iter(),
2522                         &ret_ty,
2523                         false,
2524                         hir::Unsafety::Normal,
2525                         rustc_target::spec::abi::Abi::Rust,
2526                     )
2527                 })
2528             }
2529             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2530         }
2531     }
2532 }
2533
2534 pub trait FnAbiExt<'tcx, C>
2535 where
2536     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2537         + HasDataLayout
2538         + HasTargetSpec
2539         + HasTyCtxt<'tcx>
2540         + HasParamEnv<'tcx>,
2541 {
2542     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2543     ///
2544     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2545     /// instead, where the instance is a `InstanceDef::Virtual`.
2546     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2547
2548     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2549     /// direct calls to an `fn`.
2550     ///
2551     /// NB: that includes virtual calls, which are represented by "direct calls"
2552     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2553     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2554
2555     fn new_internal(
2556         cx: &C,
2557         sig: ty::PolyFnSig<'tcx>,
2558         extra_args: &[Ty<'tcx>],
2559         caller_location: Option<Ty<'tcx>>,
2560         codegen_fn_attr_flags: CodegenFnAttrFlags,
2561         make_self_ptr_thin: bool,
2562     ) -> Self;
2563     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2564 }
2565
2566 fn fn_can_unwind(
2567     panic_strategy: PanicStrategy,
2568     codegen_fn_attr_flags: CodegenFnAttrFlags,
2569     call_conv: Conv,
2570 ) -> bool {
2571     if panic_strategy != PanicStrategy::Unwind {
2572         // In panic=abort mode we assume nothing can unwind anywhere, so
2573         // optimize based on this!
2574         false
2575     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2576         // If a specific #[unwind] attribute is present, use that.
2577         true
2578     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2579         // Special attribute for allocator functions, which can't unwind.
2580         false
2581     } else {
2582         if call_conv == Conv::Rust {
2583             // Any Rust method (or `extern "Rust" fn` or `extern
2584             // "rust-call" fn`) is explicitly allowed to unwind
2585             // (unless it has no-unwind attribute, handled above).
2586             true
2587         } else {
2588             // Anything else is either:
2589             //
2590             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2591             //
2592             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2593             //
2594             // Foreign items (case 1) are assumed to not unwind; it is
2595             // UB otherwise. (At least for now; see also
2596             // rust-lang/rust#63909 and Rust RFC 2753.)
2597             //
2598             // Items defined in Rust with non-Rust ABIs (case 2) are also
2599             // not supposed to unwind. Whether this should be enforced
2600             // (versus stating it is UB) and *how* it would be enforced
2601             // is currently under discussion; see rust-lang/rust#58794.
2602             //
2603             // In either case, we mark item as explicitly nounwind.
2604             false
2605         }
2606     }
2607 }
2608
2609 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2610 where
2611     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2612         + HasDataLayout
2613         + HasTargetSpec
2614         + HasTyCtxt<'tcx>
2615         + HasParamEnv<'tcx>,
2616 {
2617     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2618         // Assume that fn pointers may always unwind
2619         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2620
2621         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
2622     }
2623
2624     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2625         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2626
2627         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2628             Some(cx.tcx().caller_location_ty())
2629         } else {
2630             None
2631         };
2632
2633         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2634
2635         call::FnAbi::new_internal(
2636             cx,
2637             sig,
2638             extra_args,
2639             caller_location,
2640             attrs,
2641             matches!(instance.def, ty::InstanceDef::Virtual(..)),
2642         )
2643     }
2644
2645     fn new_internal(
2646         cx: &C,
2647         sig: ty::PolyFnSig<'tcx>,
2648         extra_args: &[Ty<'tcx>],
2649         caller_location: Option<Ty<'tcx>>,
2650         codegen_fn_attr_flags: CodegenFnAttrFlags,
2651         force_thin_self_ptr: bool,
2652     ) -> Self {
2653         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2654
2655         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2656
2657         use rustc_target::spec::abi::Abi::*;
2658         let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2659             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2660
2661             // It's the ABI's job to select this, not ours.
2662             System => bug!("system abi should be selected elsewhere"),
2663             EfiApi => bug!("eficall abi should be selected elsewhere"),
2664
2665             Stdcall => Conv::X86Stdcall,
2666             Fastcall => Conv::X86Fastcall,
2667             Vectorcall => Conv::X86VectorCall,
2668             Thiscall => Conv::X86ThisCall,
2669             C => Conv::C,
2670             Unadjusted => Conv::C,
2671             Win64 => Conv::X86_64Win64,
2672             SysV64 => Conv::X86_64SysV,
2673             Aapcs => Conv::ArmAapcs,
2674             CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2675             PtxKernel => Conv::PtxKernel,
2676             Msp430Interrupt => Conv::Msp430Intr,
2677             X86Interrupt => Conv::X86Intr,
2678             AmdGpuKernel => Conv::AmdGpuKernel,
2679             AvrInterrupt => Conv::AvrInterrupt,
2680             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2681
2682             // These API constants ought to be more specific...
2683             Cdecl => Conv::C,
2684         };
2685
2686         let mut inputs = sig.inputs();
2687         let extra_args = if sig.abi == RustCall {
2688             assert!(!sig.c_variadic && extra_args.is_empty());
2689
2690             if let Some(input) = sig.inputs().last() {
2691                 if let ty::Tuple(tupled_arguments) = input.kind() {
2692                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2693                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2694                 } else {
2695                     bug!(
2696                         "argument to function with \"rust-call\" ABI \
2697                             is not a tuple"
2698                     );
2699                 }
2700             } else {
2701                 bug!(
2702                     "argument to function with \"rust-call\" ABI \
2703                         is not a tuple"
2704                 );
2705             }
2706         } else {
2707             assert!(sig.c_variadic || extra_args.is_empty());
2708             extra_args.to_vec()
2709         };
2710
2711         let target = &cx.tcx().sess.target;
2712         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2713         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2714         let linux_s390x_gnu_like =
2715             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2716         let linux_sparc64_gnu_like =
2717             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2718         let linux_powerpc_gnu_like =
2719             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2720         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2721
2722         // Handle safe Rust thin and fat pointers.
2723         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2724                                       scalar: &Scalar,
2725                                       layout: TyAndLayout<'tcx>,
2726                                       offset: Size,
2727                                       is_return: bool| {
2728             // Booleans are always an i1 that needs to be zero-extended.
2729             if scalar.is_bool() {
2730                 attrs.ext(ArgExtension::Zext);
2731                 return;
2732             }
2733
2734             // Only pointer types handled below.
2735             if scalar.value != Pointer {
2736                 return;
2737             }
2738
2739             if scalar.valid_range.start() < scalar.valid_range.end() {
2740                 if *scalar.valid_range.start() > 0 {
2741                     attrs.set(ArgAttribute::NonNull);
2742                 }
2743             }
2744
2745             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2746                 if let Some(kind) = pointee.safe {
2747                     attrs.pointee_align = Some(pointee.align);
2748
2749                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2750                     // for the entire duration of the function as they can be deallocated
2751                     // at any time. Set their valid size to 0.
2752                     attrs.pointee_size = match kind {
2753                         PointerKind::UniqueOwned => Size::ZERO,
2754                         _ => pointee.size,
2755                     };
2756
2757                     // `Box` pointer parameters never alias because ownership is transferred
2758                     // `&mut` pointer parameters never alias other parameters,
2759                     // or mutable global data
2760                     //
2761                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2762                     // and can be marked as both `readonly` and `noalias`, as
2763                     // LLVM's definition of `noalias` is based solely on memory
2764                     // dependencies rather than pointer equality
2765                     let no_alias = match kind {
2766                         PointerKind::Shared => false,
2767                         PointerKind::UniqueOwned => true,
2768                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2769                     };
2770                     if no_alias {
2771                         attrs.set(ArgAttribute::NoAlias);
2772                     }
2773
2774                     if kind == PointerKind::Frozen && !is_return {
2775                         attrs.set(ArgAttribute::ReadOnly);
2776                     }
2777                 }
2778             }
2779         };
2780
2781         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2782             let is_return = arg_idx.is_none();
2783
2784             let layout = cx.layout_of(ty);
2785             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2786                 // Don't pass the vtable, it's not an argument of the virtual fn.
2787                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2788                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2789                 make_thin_self_ptr(cx, layout)
2790             } else {
2791                 layout
2792             };
2793
2794             let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2795                 let mut attrs = ArgAttributes::new();
2796                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2797                 attrs
2798             });
2799
2800             if arg.layout.is_zst() {
2801                 // For some forsaken reason, x86_64-pc-windows-gnu
2802                 // doesn't ignore zero-sized struct arguments.
2803                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2804                 if is_return
2805                     || rust_abi
2806                     || (!win_x64_gnu
2807                         && !linux_s390x_gnu_like
2808                         && !linux_sparc64_gnu_like
2809                         && !linux_powerpc_gnu_like)
2810                 {
2811                     arg.mode = PassMode::Ignore;
2812                 }
2813             }
2814
2815             arg
2816         };
2817
2818         let mut fn_abi = FnAbi {
2819             ret: arg_of(sig.output(), None),
2820             args: inputs
2821                 .iter()
2822                 .cloned()
2823                 .chain(extra_args)
2824                 .chain(caller_location)
2825                 .enumerate()
2826                 .map(|(i, ty)| arg_of(ty, Some(i)))
2827                 .collect(),
2828             c_variadic: sig.c_variadic,
2829             fixed_count: inputs.len(),
2830             conv,
2831             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2832         };
2833         fn_abi.adjust_for_abi(cx, sig.abi);
2834         debug!("FnAbi::new_internal = {:?}", fn_abi);
2835         fn_abi
2836     }
2837
2838     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2839         if abi == SpecAbi::Unadjusted {
2840             return;
2841         }
2842
2843         if abi == SpecAbi::Rust
2844             || abi == SpecAbi::RustCall
2845             || abi == SpecAbi::RustIntrinsic
2846             || abi == SpecAbi::PlatformIntrinsic
2847         {
2848             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2849                 if arg.is_ignore() {
2850                     return;
2851                 }
2852
2853                 match arg.layout.abi {
2854                     Abi::Aggregate { .. } => {}
2855
2856                     // This is a fun case! The gist of what this is doing is
2857                     // that we want callers and callees to always agree on the
2858                     // ABI of how they pass SIMD arguments. If we were to *not*
2859                     // make these arguments indirect then they'd be immediates
2860                     // in LLVM, which means that they'd used whatever the
2861                     // appropriate ABI is for the callee and the caller. That
2862                     // means, for example, if the caller doesn't have AVX
2863                     // enabled but the callee does, then passing an AVX argument
2864                     // across this boundary would cause corrupt data to show up.
2865                     //
2866                     // This problem is fixed by unconditionally passing SIMD
2867                     // arguments through memory between callers and callees
2868                     // which should get them all to agree on ABI regardless of
2869                     // target feature sets. Some more information about this
2870                     // issue can be found in #44367.
2871                     //
2872                     // Note that the platform intrinsic ABI is exempt here as
2873                     // that's how we connect up to LLVM and it's unstable
2874                     // anyway, we control all calls to it in libstd.
2875                     Abi::Vector { .. }
2876                         if abi != SpecAbi::PlatformIntrinsic
2877                             && cx.tcx().sess.target.simd_types_indirect =>
2878                     {
2879                         arg.make_indirect();
2880                         return;
2881                     }
2882
2883                     _ => return,
2884                 }
2885
2886                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2887                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2888                 let max_by_val_size = Pointer.size(cx) * 2;
2889                 let size = arg.layout.size;
2890
2891                 if arg.layout.is_unsized() || size > max_by_val_size {
2892                     arg.make_indirect();
2893                 } else {
2894                     // We want to pass small aggregates as immediates, but using
2895                     // a LLVM aggregate type for this leads to bad optimizations,
2896                     // so we pick an appropriately sized integer type instead.
2897                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2898                 }
2899             };
2900             fixup(&mut self.ret);
2901             for arg in &mut self.args {
2902                 fixup(arg);
2903             }
2904             return;
2905         }
2906
2907         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2908             cx.tcx().sess.fatal(&msg);
2909         }
2910     }
2911 }
2912
2913 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
2914 where
2915     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2916         + HasTyCtxt<'tcx>
2917         + HasParamEnv<'tcx>,
2918 {
2919     let fat_pointer_ty = if layout.is_unsized() {
2920         // unsized `self` is passed as a pointer to `self`
2921         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2922         cx.tcx().mk_mut_ptr(layout.ty)
2923     } else {
2924         match layout.abi {
2925             Abi::ScalarPair(..) => (),
2926             _ => bug!("receiver type has unsupported layout: {:?}", layout),
2927         }
2928
2929         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2930         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2931         // elsewhere in the compiler as a method on a `dyn Trait`.
2932         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2933         // get a built-in pointer type
2934         let mut fat_pointer_layout = layout;
2935         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2936             && !fat_pointer_layout.ty.is_region_ptr()
2937         {
2938             for i in 0..fat_pointer_layout.fields.count() {
2939                 let field_layout = fat_pointer_layout.field(cx, i);
2940
2941                 if !field_layout.is_zst() {
2942                     fat_pointer_layout = field_layout;
2943                     continue 'descend_newtypes;
2944                 }
2945             }
2946
2947             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2948         }
2949
2950         fat_pointer_layout.ty
2951     };
2952
2953     // we now have a type like `*mut RcBox<dyn Trait>`
2954     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2955     // this is understood as a special case elsewhere in the compiler
2956     let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2957     layout = cx.layout_of(unit_pointer_ty);
2958     layout.ty = fat_pointer_ty;
2959     layout
2960 }