]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_middle/src/ty/layout.rs
Auto merge of #71481 - estebank:inherit-stability, r=nikomatsakis
[rust.git] / compiler / rustc_middle / src / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast as ast;
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34     fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
35     fn repr_discr<'tcx>(
36         tcx: TyCtxt<'tcx>,
37         ty: Ty<'tcx>,
38         repr: &ReprOptions,
39         min: i128,
40         max: i128,
41     ) -> (Integer, bool);
42 }
43
44 impl IntegerExt for Integer {
45     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
46         match (*self, signed) {
47             (I8, false) => tcx.types.u8,
48             (I16, false) => tcx.types.u16,
49             (I32, false) => tcx.types.u32,
50             (I64, false) => tcx.types.u64,
51             (I128, false) => tcx.types.u128,
52             (I8, true) => tcx.types.i8,
53             (I16, true) => tcx.types.i16,
54             (I32, true) => tcx.types.i32,
55             (I64, true) => tcx.types.i64,
56             (I128, true) => tcx.types.i128,
57         }
58     }
59
60     /// Gets the Integer type from an attr::IntType.
61     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
62         let dl = cx.data_layout();
63
64         match ity {
65             attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
66             attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
67             attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
68             attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
69             attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
70             attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
71                 dl.ptr_sized_integer()
72             }
73         }
74     }
75
76     fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
77         match ity {
78             ty::IntTy::I8 => I8,
79             ty::IntTy::I16 => I16,
80             ty::IntTy::I32 => I32,
81             ty::IntTy::I64 => I64,
82             ty::IntTy::I128 => I128,
83             ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
84         }
85     }
86     fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
87         match ity {
88             ty::UintTy::U8 => I8,
89             ty::UintTy::U16 => I16,
90             ty::UintTy::U32 => I32,
91             ty::UintTy::U64 => I64,
92             ty::UintTy::U128 => I128,
93             ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
94         }
95     }
96
97     /// Finds the appropriate Integer type and signedness for the given
98     /// signed discriminant range and `#[repr]` attribute.
99     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
100     /// that shouldn't affect anything, other than maybe debuginfo.
101     fn repr_discr<'tcx>(
102         tcx: TyCtxt<'tcx>,
103         ty: Ty<'tcx>,
104         repr: &ReprOptions,
105         min: i128,
106         max: i128,
107     ) -> (Integer, bool) {
108         // Theoretically, negative values could be larger in unsigned representation
109         // than the unsigned representation of the signed minimum. However, if there
110         // are any negative values, the only valid unsigned representation is u128
111         // which can fit all i128 values, so the result remains unaffected.
112         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
113         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
114
115         let mut min_from_extern = None;
116         let min_default = I8;
117
118         if let Some(ity) = repr.int {
119             let discr = Integer::from_attr(&tcx, ity);
120             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
121             if discr < fit {
122                 bug!(
123                     "Integer::repr_discr: `#[repr]` hint too small for \
124                       discriminant range of enum `{}",
125                     ty
126                 )
127             }
128             return (discr, ity.is_signed());
129         }
130
131         if repr.c() {
132             match &tcx.sess.target.arch[..] {
133                 "hexagon" => min_from_extern = Some(I8),
134                 // WARNING: the ARM EABI has two variants; the one corresponding
135                 // to `at_least == I32` appears to be used on Linux and NetBSD,
136                 // but some systems may use the variant corresponding to no
137                 // lower bound. However, we don't run on those yet...?
138                 "arm" => min_from_extern = Some(I32),
139                 _ => min_from_extern = Some(I32),
140             }
141         }
142
143         let at_least = min_from_extern.unwrap_or(min_default);
144
145         // If there are no negative values, we can use the unsigned fit.
146         if min >= 0 {
147             (cmp::max(unsigned_fit, at_least), false)
148         } else {
149             (cmp::max(signed_fit, at_least), true)
150         }
151     }
152 }
153
154 pub trait PrimitiveExt {
155     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
157 }
158
159 impl PrimitiveExt for Primitive {
160     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161         match *self {
162             Int(i, signed) => i.to_ty(tcx, signed),
163             F32 => tcx.types.f32,
164             F64 => tcx.types.f64,
165             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
166         }
167     }
168
169     /// Return an *integer* type matching this primitive.
170     /// Useful in particular when dealing with enum discriminants.
171     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
172         match *self {
173             Int(i, signed) => i.to_ty(tcx, signed),
174             Pointer => tcx.types.usize,
175             F32 | F64 => bug!("floats do not have an int type"),
176         }
177     }
178 }
179
180 /// The first half of a fat pointer.
181 ///
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
185
186 /// The second half of a fat pointer.
187 ///
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
191
192 /// The maximum supported number of lanes in a SIMD vector.
193 ///
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
198
199 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
201     Unknown(Ty<'tcx>),
202     SizeOverflow(Ty<'tcx>),
203 }
204
205 impl<'tcx> fmt::Display for LayoutError<'tcx> {
206     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
207         match *self {
208             LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
209             LayoutError::SizeOverflow(ty) => {
210                 write!(f, "values of the type `{}` are too big for the current architecture", ty)
211             }
212         }
213     }
214 }
215
216 fn layout_raw<'tcx>(
217     tcx: TyCtxt<'tcx>,
218     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
219 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
220     ty::tls::with_related_context(tcx, move |icx| {
221         let (param_env, ty) = query.into_parts();
222
223         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
224             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
225         }
226
227         // Update the ImplicitCtxt to increase the layout_depth
228         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
229
230         ty::tls::enter_context(&icx, |_| {
231             let cx = LayoutCx { tcx, param_env };
232             let layout = cx.layout_raw_uncached(ty);
233             // Type-level uninhabitedness should always imply ABI uninhabitedness.
234             if let Ok(layout) = layout {
235                 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
236                     assert!(layout.abi.is_uninhabited());
237                 }
238             }
239             layout
240         })
241     })
242 }
243
244 pub fn provide(providers: &mut ty::query::Providers) {
245     *providers = ty::query::Providers { layout_raw, ..*providers };
246 }
247
248 pub struct LayoutCx<'tcx, C> {
249     pub tcx: C,
250     pub param_env: ty::ParamEnv<'tcx>,
251 }
252
253 #[derive(Copy, Clone, Debug)]
254 enum StructKind {
255     /// A tuple, closure, or univariant which cannot be coerced to unsized.
256     AlwaysSized,
257     /// A univariant, the last field of which may be coerced to unsized.
258     MaybeUnsized,
259     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
260     Prefixed(Size, Align),
261 }
262
263 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
264 // This is used to go between `memory_index` (source field order to memory order)
265 // and `inverse_memory_index` (memory order to source field order).
266 // See also `FieldsShape::Arbitrary::memory_index` for more details.
267 // FIXME(eddyb) build a better abstraction for permutations, if possible.
268 fn invert_mapping(map: &[u32]) -> Vec<u32> {
269     let mut inverse = vec![0; map.len()];
270     for i in 0..map.len() {
271         inverse[map[i] as usize] = i as u32;
272     }
273     inverse
274 }
275
276 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
277     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
278         let dl = self.data_layout();
279         let b_align = b.value.align(dl);
280         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
281         let b_offset = a.value.size(dl).align_to(b_align.abi);
282         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
283
284         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
285         // returns the last maximum.
286         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
287             .into_iter()
288             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
289             .max_by_key(|niche| niche.available(dl));
290
291         Layout {
292             variants: Variants::Single { index: VariantIdx::new(0) },
293             fields: FieldsShape::Arbitrary {
294                 offsets: vec![Size::ZERO, b_offset],
295                 memory_index: vec![0, 1],
296             },
297             abi: Abi::ScalarPair(a, b),
298             largest_niche,
299             align,
300             size,
301         }
302     }
303
304     fn univariant_uninterned(
305         &self,
306         ty: Ty<'tcx>,
307         fields: &[TyAndLayout<'_>],
308         repr: &ReprOptions,
309         kind: StructKind,
310     ) -> Result<Layout, LayoutError<'tcx>> {
311         let dl = self.data_layout();
312         let pack = repr.pack;
313         if pack.is_some() && repr.align.is_some() {
314             bug!("struct cannot be packed and aligned");
315         }
316
317         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
318
319         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
320
321         let optimize = !repr.inhibit_struct_field_reordering_opt();
322         if optimize {
323             let end =
324                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
325             let optimizing = &mut inverse_memory_index[..end];
326             let field_align = |f: &TyAndLayout<'_>| {
327                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
328             };
329             match kind {
330                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
331                     optimizing.sort_by_key(|&x| {
332                         // Place ZSTs first to avoid "interesting offsets",
333                         // especially with only one or two non-ZST fields.
334                         let f = &fields[x as usize];
335                         (!f.is_zst(), cmp::Reverse(field_align(f)))
336                     });
337                 }
338                 StructKind::Prefixed(..) => {
339                     // Sort in ascending alignment so that the layout stay optimal
340                     // regardless of the prefix
341                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
342                 }
343             }
344         }
345
346         // inverse_memory_index holds field indices by increasing memory offset.
347         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
348         // We now write field offsets to the corresponding offset slot;
349         // field 5 with offset 0 puts 0 in offsets[5].
350         // At the bottom of this function, we invert `inverse_memory_index` to
351         // produce `memory_index` (see `invert_mapping`).
352
353         let mut sized = true;
354         let mut offsets = vec![Size::ZERO; fields.len()];
355         let mut offset = Size::ZERO;
356         let mut largest_niche = None;
357         let mut largest_niche_available = 0;
358
359         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
360             let prefix_align =
361                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
362             align = align.max(AbiAndPrefAlign::new(prefix_align));
363             offset = prefix_size.align_to(prefix_align);
364         }
365
366         for &i in &inverse_memory_index {
367             let field = fields[i as usize];
368             if !sized {
369                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
370             }
371
372             if field.is_unsized() {
373                 sized = false;
374             }
375
376             // Invariant: offset < dl.obj_size_bound() <= 1<<61
377             let field_align = if let Some(pack) = pack {
378                 field.align.min(AbiAndPrefAlign::new(pack))
379             } else {
380                 field.align
381             };
382             offset = offset.align_to(field_align.abi);
383             align = align.max(field_align);
384
385             debug!("univariant offset: {:?} field: {:#?}", offset, field);
386             offsets[i as usize] = offset;
387
388             if !repr.hide_niche() {
389                 if let Some(mut niche) = field.largest_niche.clone() {
390                     let available = niche.available(dl);
391                     if available > largest_niche_available {
392                         largest_niche_available = available;
393                         niche.offset += offset;
394                         largest_niche = Some(niche);
395                     }
396                 }
397             }
398
399             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
400         }
401
402         if let Some(repr_align) = repr.align {
403             align = align.max(AbiAndPrefAlign::new(repr_align));
404         }
405
406         debug!("univariant min_size: {:?}", offset);
407         let min_size = offset;
408
409         // As stated above, inverse_memory_index holds field indices by increasing offset.
410         // This makes it an already-sorted view of the offsets vec.
411         // To invert it, consider:
412         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
413         // Field 5 would be the first element, so memory_index is i:
414         // Note: if we didn't optimize, it's already right.
415
416         let memory_index =
417             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
418
419         let size = min_size.align_to(align.abi);
420         let mut abi = Abi::Aggregate { sized };
421
422         // Unpack newtype ABIs and find scalar pairs.
423         if sized && size.bytes() > 0 {
424             // All other fields must be ZSTs.
425             let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
426
427             match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
428                 // We have exactly one non-ZST field.
429                 (Some((i, field)), None, None) => {
430                     // Field fills the struct and it has a scalar or scalar pair ABI.
431                     if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
432                     {
433                         match field.abi {
434                             // For plain scalars, or vectors of them, we can't unpack
435                             // newtypes for `#[repr(C)]`, as that affects C ABIs.
436                             Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
437                                 abi = field.abi.clone();
438                             }
439                             // But scalar pairs are Rust-specific and get
440                             // treated as aggregates by C ABIs anyway.
441                             Abi::ScalarPair(..) => {
442                                 abi = field.abi.clone();
443                             }
444                             _ => {}
445                         }
446                     }
447                 }
448
449                 // Two non-ZST fields, and they're both scalars.
450                 (
451                     Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
452                     Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
453                     None,
454                 ) => {
455                     // Order by the memory placement, not source order.
456                     let ((i, a), (j, b)) =
457                         if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
458                     let pair = self.scalar_pair(a.clone(), b.clone());
459                     let pair_offsets = match pair.fields {
460                         FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
461                             assert_eq!(memory_index, &[0, 1]);
462                             offsets
463                         }
464                         _ => bug!(),
465                     };
466                     if offsets[i] == pair_offsets[0]
467                         && offsets[j] == pair_offsets[1]
468                         && align == pair.align
469                         && size == pair.size
470                     {
471                         // We can use `ScalarPair` only when it matches our
472                         // already computed layout (including `#[repr(C)]`).
473                         abi = pair.abi;
474                     }
475                 }
476
477                 _ => {}
478             }
479         }
480
481         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
482             abi = Abi::Uninhabited;
483         }
484
485         Ok(Layout {
486             variants: Variants::Single { index: VariantIdx::new(0) },
487             fields: FieldsShape::Arbitrary { offsets, memory_index },
488             abi,
489             largest_niche,
490             align,
491             size,
492         })
493     }
494
495     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
496         let tcx = self.tcx;
497         let param_env = self.param_env;
498         let dl = self.data_layout();
499         let scalar_unit = |value: Primitive| {
500             let bits = value.size(dl).bits();
501             assert!(bits <= 128);
502             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
503         };
504         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
505
506         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
507             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
508         };
509         debug_assert!(!ty.has_infer_types_or_consts());
510
511         Ok(match *ty.kind() {
512             // Basic scalars.
513             ty::Bool => tcx.intern_layout(Layout::scalar(
514                 self,
515                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
516             )),
517             ty::Char => tcx.intern_layout(Layout::scalar(
518                 self,
519                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
520             )),
521             ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
522             ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
523             ty::Float(fty) => scalar(match fty {
524                 ty::FloatTy::F32 => F32,
525                 ty::FloatTy::F64 => F64,
526             }),
527             ty::FnPtr(_) => {
528                 let mut ptr = scalar_unit(Pointer);
529                 ptr.valid_range = 1..=*ptr.valid_range.end();
530                 tcx.intern_layout(Layout::scalar(self, ptr))
531             }
532
533             // The never type.
534             ty::Never => tcx.intern_layout(Layout {
535                 variants: Variants::Single { index: VariantIdx::new(0) },
536                 fields: FieldsShape::Primitive,
537                 abi: Abi::Uninhabited,
538                 largest_niche: None,
539                 align: dl.i8_align,
540                 size: Size::ZERO,
541             }),
542
543             // Potentially-wide pointers.
544             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
545                 let mut data_ptr = scalar_unit(Pointer);
546                 if !ty.is_unsafe_ptr() {
547                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
548                 }
549
550                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
551                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
552                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
553                 }
554
555                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
556                 let metadata = match unsized_part.kind() {
557                     ty::Foreign(..) => {
558                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
559                     }
560                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
561                     ty::Dynamic(..) => {
562                         let mut vtable = scalar_unit(Pointer);
563                         vtable.valid_range = 1..=*vtable.valid_range.end();
564                         vtable
565                     }
566                     _ => return Err(LayoutError::Unknown(unsized_part)),
567                 };
568
569                 // Effectively a (ptr, meta) tuple.
570                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
571             }
572
573             // Arrays and slices.
574             ty::Array(element, mut count) => {
575                 if count.has_projections() {
576                     count = tcx.normalize_erasing_regions(param_env, count);
577                     if count.has_projections() {
578                         return Err(LayoutError::Unknown(ty));
579                     }
580                 }
581
582                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
583                 let element = self.layout_of(element)?;
584                 let size =
585                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
586
587                 let abi =
588                     if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
589                         Abi::Uninhabited
590                     } else {
591                         Abi::Aggregate { sized: true }
592                     };
593
594                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
595
596                 tcx.intern_layout(Layout {
597                     variants: Variants::Single { index: VariantIdx::new(0) },
598                     fields: FieldsShape::Array { stride: element.size, count },
599                     abi,
600                     largest_niche,
601                     align: element.align,
602                     size,
603                 })
604             }
605             ty::Slice(element) => {
606                 let element = self.layout_of(element)?;
607                 tcx.intern_layout(Layout {
608                     variants: Variants::Single { index: VariantIdx::new(0) },
609                     fields: FieldsShape::Array { stride: element.size, count: 0 },
610                     abi: Abi::Aggregate { sized: false },
611                     largest_niche: None,
612                     align: element.align,
613                     size: Size::ZERO,
614                 })
615             }
616             ty::Str => tcx.intern_layout(Layout {
617                 variants: Variants::Single { index: VariantIdx::new(0) },
618                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
619                 abi: Abi::Aggregate { sized: false },
620                 largest_niche: None,
621                 align: dl.i8_align,
622                 size: Size::ZERO,
623             }),
624
625             // Odd unit types.
626             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
627             ty::Dynamic(..) | ty::Foreign(..) => {
628                 let mut unit = self.univariant_uninterned(
629                     ty,
630                     &[],
631                     &ReprOptions::default(),
632                     StructKind::AlwaysSized,
633                 )?;
634                 match unit.abi {
635                     Abi::Aggregate { ref mut sized } => *sized = false,
636                     _ => bug!(),
637                 }
638                 tcx.intern_layout(unit)
639             }
640
641             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
642
643             ty::Closure(_, ref substs) => {
644                 let tys = substs.as_closure().upvar_tys();
645                 univariant(
646                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
647                     &ReprOptions::default(),
648                     StructKind::AlwaysSized,
649                 )?
650             }
651
652             ty::Tuple(tys) => {
653                 let kind =
654                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
655
656                 univariant(
657                     &tys.iter()
658                         .map(|k| self.layout_of(k.expect_ty()))
659                         .collect::<Result<Vec<_>, _>>()?,
660                     &ReprOptions::default(),
661                     kind,
662                 )?
663             }
664
665             // SIMD vector types.
666             ty::Adt(def, substs) if def.repr.simd() => {
667                 // Supported SIMD vectors are homogeneous ADTs with at least one field:
668                 //
669                 // * #[repr(simd)] struct S(T, T, T, T);
670                 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
671                 // * #[repr(simd)] struct S([T; 4])
672                 //
673                 // where T is a primitive scalar (integer/float/pointer).
674
675                 // SIMD vectors with zero fields are not supported.
676                 // (should be caught by typeck)
677                 if def.non_enum_variant().fields.is_empty() {
678                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
679                 }
680
681                 // Type of the first ADT field:
682                 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
683
684                 // Heterogeneous SIMD vectors are not supported:
685                 // (should be caught by typeck)
686                 for fi in &def.non_enum_variant().fields {
687                     if fi.ty(tcx, substs) != f0_ty {
688                         tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
689                     }
690                 }
691
692                 // The element type and number of elements of the SIMD vector
693                 // are obtained from:
694                 //
695                 // * the element type and length of the single array field, if
696                 // the first field is of array type, or
697                 //
698                 // * the homogenous field type and the number of fields.
699                 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
700                     // First ADT field is an array:
701
702                     // SIMD vectors with multiple array fields are not supported:
703                     // (should be caught by typeck)
704                     if def.non_enum_variant().fields.len() != 1 {
705                         tcx.sess.fatal(&format!(
706                             "monomorphising SIMD type `{}` with more than one array field",
707                             ty
708                         ));
709                     }
710
711                     // Extract the number of elements from the layout of the array field:
712                     let len = if let Ok(TyAndLayout {
713                         layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
714                         ..
715                     }) = self.layout_of(f0_ty)
716                     {
717                         count
718                     } else {
719                         return Err(LayoutError::Unknown(ty));
720                     };
721
722                     (*e_ty, *len, true)
723                 } else {
724                     // First ADT field is not an array:
725                     (f0_ty, def.non_enum_variant().fields.len() as _, false)
726                 };
727
728                 // SIMD vectors of zero length are not supported.
729                 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
730                 // support.
731                 //
732                 // Can't be caught in typeck if the array length is generic.
733                 if e_len == 0 {
734                     tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
735                 } else if e_len > MAX_SIMD_LANES {
736                     tcx.sess.fatal(&format!(
737                         "monomorphising SIMD type `{}` of length greater than {}",
738                         ty, MAX_SIMD_LANES,
739                     ));
740                 }
741
742                 // Compute the ABI of the element type:
743                 let e_ly = self.layout_of(e_ty)?;
744                 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
745                     scalar.clone()
746                 } else {
747                     // This error isn't caught in typeck, e.g., if
748                     // the element type of the vector is generic.
749                     tcx.sess.fatal(&format!(
750                         "monomorphising SIMD type `{}` with a non-primitive-scalar \
751                         (integer/float/pointer) element type `{}`",
752                         ty, e_ty
753                     ))
754                 };
755
756                 // Compute the size and alignment of the vector:
757                 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
758                 let align = dl.vector_align(size);
759                 let size = size.align_to(align.abi);
760
761                 // Compute the placement of the vector fields:
762                 let fields = if is_array {
763                     FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
764                 } else {
765                     FieldsShape::Array { stride: e_ly.size, count: e_len }
766                 };
767
768                 tcx.intern_layout(Layout {
769                     variants: Variants::Single { index: VariantIdx::new(0) },
770                     fields,
771                     abi: Abi::Vector { element: e_abi, count: e_len },
772                     largest_niche: e_ly.largest_niche.clone(),
773                     size,
774                     align,
775                 })
776             }
777
778             // ADTs.
779             ty::Adt(def, substs) => {
780                 // Cache the field layouts.
781                 let variants = def
782                     .variants
783                     .iter()
784                     .map(|v| {
785                         v.fields
786                             .iter()
787                             .map(|field| self.layout_of(field.ty(tcx, substs)))
788                             .collect::<Result<Vec<_>, _>>()
789                     })
790                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
791
792                 if def.is_union() {
793                     if def.repr.pack.is_some() && def.repr.align.is_some() {
794                         bug!("union cannot be packed and aligned");
795                     }
796
797                     let mut align =
798                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
799
800                     if let Some(repr_align) = def.repr.align {
801                         align = align.max(AbiAndPrefAlign::new(repr_align));
802                     }
803
804                     let optimize = !def.repr.inhibit_union_abi_opt();
805                     let mut size = Size::ZERO;
806                     let mut abi = Abi::Aggregate { sized: true };
807                     let index = VariantIdx::new(0);
808                     for field in &variants[index] {
809                         assert!(!field.is_unsized());
810                         align = align.max(field.align);
811
812                         // If all non-ZST fields have the same ABI, forward this ABI
813                         if optimize && !field.is_zst() {
814                             // Normalize scalar_unit to the maximal valid range
815                             let field_abi = match &field.abi {
816                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
817                                 Abi::ScalarPair(x, y) => {
818                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
819                                 }
820                                 Abi::Vector { element: x, count } => {
821                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
822                                 }
823                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
824                                     Abi::Aggregate { sized: true }
825                                 }
826                             };
827
828                             if size == Size::ZERO {
829                                 // first non ZST: initialize 'abi'
830                                 abi = field_abi;
831                             } else if abi != field_abi {
832                                 // different fields have different ABI: reset to Aggregate
833                                 abi = Abi::Aggregate { sized: true };
834                             }
835                         }
836
837                         size = cmp::max(size, field.size);
838                     }
839
840                     if let Some(pack) = def.repr.pack {
841                         align = align.min(AbiAndPrefAlign::new(pack));
842                     }
843
844                     return Ok(tcx.intern_layout(Layout {
845                         variants: Variants::Single { index },
846                         fields: FieldsShape::Union(
847                             NonZeroUsize::new(variants[index].len())
848                                 .ok_or(LayoutError::Unknown(ty))?,
849                         ),
850                         abi,
851                         largest_niche: None,
852                         align,
853                         size: size.align_to(align.abi),
854                     }));
855                 }
856
857                 // A variant is absent if it's uninhabited and only has ZST fields.
858                 // Present uninhabited variants only require space for their fields,
859                 // but *not* an encoding of the discriminant (e.g., a tag value).
860                 // See issue #49298 for more details on the need to leave space
861                 // for non-ZST uninhabited data (mostly partial initialization).
862                 let absent = |fields: &[TyAndLayout<'_>]| {
863                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
864                     let is_zst = fields.iter().all(|f| f.is_zst());
865                     uninhabited && is_zst
866                 };
867                 let (present_first, present_second) = {
868                     let mut present_variants = variants
869                         .iter_enumerated()
870                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
871                     (present_variants.next(), present_variants.next())
872                 };
873                 let present_first = match present_first {
874                     Some(present_first) => present_first,
875                     // Uninhabited because it has no variants, or only absent ones.
876                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
877                     // If it's a struct, still compute a layout so that we can still compute the
878                     // field offsets.
879                     None => VariantIdx::new(0),
880                 };
881
882                 let is_struct = !def.is_enum() ||
883                     // Only one variant is present.
884                     (present_second.is_none() &&
885                     // Representation optimizations are allowed.
886                     !def.repr.inhibit_enum_layout_opt());
887                 if is_struct {
888                     // Struct, or univariant enum equivalent to a struct.
889                     // (Typechecking will reject discriminant-sizing attrs.)
890
891                     let v = present_first;
892                     let kind = if def.is_enum() || variants[v].is_empty() {
893                         StructKind::AlwaysSized
894                     } else {
895                         let param_env = tcx.param_env(def.did);
896                         let last_field = def.variants[v].fields.last().unwrap();
897                         let always_sized =
898                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
899                         if !always_sized {
900                             StructKind::MaybeUnsized
901                         } else {
902                             StructKind::AlwaysSized
903                         }
904                     };
905
906                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
907                     st.variants = Variants::Single { index: v };
908                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
909                     match st.abi {
910                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
911                             // the asserts ensure that we are not using the
912                             // `#[rustc_layout_scalar_valid_range(n)]`
913                             // attribute to widen the range of anything as that would probably
914                             // result in UB somewhere
915                             // FIXME(eddyb) the asserts are probably not needed,
916                             // as larger validity ranges would result in missed
917                             // optimizations, *not* wrongly assuming the inner
918                             // value is valid. e.g. unions enlarge validity ranges,
919                             // because the values may be uninitialized.
920                             if let Bound::Included(start) = start {
921                                 // FIXME(eddyb) this might be incorrect - it doesn't
922                                 // account for wrap-around (end < start) ranges.
923                                 assert!(*scalar.valid_range.start() <= start);
924                                 scalar.valid_range = start..=*scalar.valid_range.end();
925                             }
926                             if let Bound::Included(end) = end {
927                                 // FIXME(eddyb) this might be incorrect - it doesn't
928                                 // account for wrap-around (end < start) ranges.
929                                 assert!(*scalar.valid_range.end() >= end);
930                                 scalar.valid_range = *scalar.valid_range.start()..=end;
931                             }
932
933                             // Update `largest_niche` if we have introduced a larger niche.
934                             let niche = if def.repr.hide_niche() {
935                                 None
936                             } else {
937                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
938                             };
939                             if let Some(niche) = niche {
940                                 match &st.largest_niche {
941                                     Some(largest_niche) => {
942                                         // Replace the existing niche even if they're equal,
943                                         // because this one is at a lower offset.
944                                         if largest_niche.available(dl) <= niche.available(dl) {
945                                             st.largest_niche = Some(niche);
946                                         }
947                                     }
948                                     None => st.largest_niche = Some(niche),
949                                 }
950                             }
951                         }
952                         _ => assert!(
953                             start == Bound::Unbounded && end == Bound::Unbounded,
954                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
955                             def,
956                             st,
957                         ),
958                     }
959
960                     return Ok(tcx.intern_layout(st));
961                 }
962
963                 // At this point, we have handled all unions and
964                 // structs. (We have also handled univariant enums
965                 // that allow representation optimization.)
966                 assert!(def.is_enum());
967
968                 // The current code for niche-filling relies on variant indices
969                 // instead of actual discriminants, so dataful enums with
970                 // explicit discriminants (RFC #2363) would misbehave.
971                 let no_explicit_discriminants = def
972                     .variants
973                     .iter_enumerated()
974                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
975
976                 let mut niche_filling_layout = None;
977
978                 // Niche-filling enum optimization.
979                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
980                     let mut dataful_variant = None;
981                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
982
983                     // Find one non-ZST variant.
984                     'variants: for (v, fields) in variants.iter_enumerated() {
985                         if absent(fields) {
986                             continue 'variants;
987                         }
988                         for f in fields {
989                             if !f.is_zst() {
990                                 if dataful_variant.is_none() {
991                                     dataful_variant = Some(v);
992                                     continue 'variants;
993                                 } else {
994                                     dataful_variant = None;
995                                     break 'variants;
996                                 }
997                             }
998                         }
999                         niche_variants = *niche_variants.start().min(&v)..=v;
1000                     }
1001
1002                     if niche_variants.start() > niche_variants.end() {
1003                         dataful_variant = None;
1004                     }
1005
1006                     if let Some(i) = dataful_variant {
1007                         let count = (niche_variants.end().as_u32()
1008                             - niche_variants.start().as_u32()
1009                             + 1) as u128;
1010
1011                         // Find the field with the largest niche
1012                         let niche_candidate = variants[i]
1013                             .iter()
1014                             .enumerate()
1015                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1016                             .max_by_key(|(_, niche)| niche.available(dl));
1017
1018                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
1019                             niche_candidate.and_then(|(field_index, niche)| {
1020                                 Some((field_index, niche, niche.reserve(self, count)?))
1021                             })
1022                         {
1023                             let mut align = dl.aggregate_align;
1024                             let st = variants
1025                                 .iter_enumerated()
1026                                 .map(|(j, v)| {
1027                                     let mut st = self.univariant_uninterned(
1028                                         ty,
1029                                         v,
1030                                         &def.repr,
1031                                         StructKind::AlwaysSized,
1032                                     )?;
1033                                     st.variants = Variants::Single { index: j };
1034
1035                                     align = align.max(st.align);
1036
1037                                     Ok(st)
1038                                 })
1039                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1040
1041                             let offset = st[i].fields.offset(field_index) + niche.offset;
1042                             let size = st[i].size;
1043
1044                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1045                                 Abi::Uninhabited
1046                             } else {
1047                                 match st[i].abi {
1048                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1049                                     Abi::ScalarPair(ref first, ref second) => {
1050                                         // We need to use scalar_unit to reset the
1051                                         // valid range to the maximal one for that
1052                                         // primitive, because only the niche is
1053                                         // guaranteed to be initialised, not the
1054                                         // other primitive.
1055                                         if offset.bytes() == 0 {
1056                                             Abi::ScalarPair(
1057                                                 niche_scalar.clone(),
1058                                                 scalar_unit(second.value),
1059                                             )
1060                                         } else {
1061                                             Abi::ScalarPair(
1062                                                 scalar_unit(first.value),
1063                                                 niche_scalar.clone(),
1064                                             )
1065                                         }
1066                                     }
1067                                     _ => Abi::Aggregate { sized: true },
1068                                 }
1069                             };
1070
1071                             let largest_niche =
1072                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1073
1074                             niche_filling_layout = Some(Layout {
1075                                 variants: Variants::Multiple {
1076                                     tag: niche_scalar,
1077                                     tag_encoding: TagEncoding::Niche {
1078                                         dataful_variant: i,
1079                                         niche_variants,
1080                                         niche_start,
1081                                     },
1082                                     tag_field: 0,
1083                                     variants: st,
1084                                 },
1085                                 fields: FieldsShape::Arbitrary {
1086                                     offsets: vec![offset],
1087                                     memory_index: vec![0],
1088                                 },
1089                                 abi,
1090                                 largest_niche,
1091                                 size,
1092                                 align,
1093                             });
1094                         }
1095                     }
1096                 }
1097
1098                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1099                 let discr_type = def.repr.discr_type();
1100                 let bits = Integer::from_attr(self, discr_type).size().bits();
1101                 for (i, discr) in def.discriminants(tcx) {
1102                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1103                         continue;
1104                     }
1105                     let mut x = discr.val as i128;
1106                     if discr_type.is_signed() {
1107                         // sign extend the raw representation to be an i128
1108                         x = (x << (128 - bits)) >> (128 - bits);
1109                     }
1110                     if x < min {
1111                         min = x;
1112                     }
1113                     if x > max {
1114                         max = x;
1115                     }
1116                 }
1117                 // We might have no inhabited variants, so pretend there's at least one.
1118                 if (min, max) == (i128::MAX, i128::MIN) {
1119                     min = 0;
1120                     max = 0;
1121                 }
1122                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1123                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1124
1125                 let mut align = dl.aggregate_align;
1126                 let mut size = Size::ZERO;
1127
1128                 // We're interested in the smallest alignment, so start large.
1129                 let mut start_align = Align::from_bytes(256).unwrap();
1130                 assert_eq!(Integer::for_align(dl, start_align), None);
1131
1132                 // repr(C) on an enum tells us to make a (tag, union) layout,
1133                 // so we need to grow the prefix alignment to be at least
1134                 // the alignment of the union. (This value is used both for
1135                 // determining the alignment of the overall enum, and the
1136                 // determining the alignment of the payload after the tag.)
1137                 let mut prefix_align = min_ity.align(dl).abi;
1138                 if def.repr.c() {
1139                     for fields in &variants {
1140                         for field in fields {
1141                             prefix_align = prefix_align.max(field.align.abi);
1142                         }
1143                     }
1144                 }
1145
1146                 // Create the set of structs that represent each variant.
1147                 let mut layout_variants = variants
1148                     .iter_enumerated()
1149                     .map(|(i, field_layouts)| {
1150                         let mut st = self.univariant_uninterned(
1151                             ty,
1152                             &field_layouts,
1153                             &def.repr,
1154                             StructKind::Prefixed(min_ity.size(), prefix_align),
1155                         )?;
1156                         st.variants = Variants::Single { index: i };
1157                         // Find the first field we can't move later
1158                         // to make room for a larger discriminant.
1159                         for field in
1160                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1161                         {
1162                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1163                                 start_align = start_align.min(field.align.abi);
1164                                 break;
1165                             }
1166                         }
1167                         size = cmp::max(size, st.size);
1168                         align = align.max(st.align);
1169                         Ok(st)
1170                     })
1171                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1172
1173                 // Align the maximum variant size to the largest alignment.
1174                 size = size.align_to(align.abi);
1175
1176                 if size.bytes() >= dl.obj_size_bound() {
1177                     return Err(LayoutError::SizeOverflow(ty));
1178                 }
1179
1180                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1181                 if typeck_ity < min_ity {
1182                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1183                     // some reason at this point (based on values discriminant can take on). Mostly
1184                     // because this discriminant will be loaded, and then stored into variable of
1185                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1186                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1187                     // discriminant values. That would be a bug, because then, in codegen, in order
1188                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1189                     // space necessary to represent would have to be discarded (or layout is wrong
1190                     // on thinking it needs 16 bits)
1191                     bug!(
1192                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1193                         min_ity,
1194                         typeck_ity
1195                     );
1196                     // However, it is fine to make discr type however large (as an optimisation)
1197                     // after this point â€“ we’ll just truncate the value we load in codegen.
1198                 }
1199
1200                 // Check to see if we should use a different type for the
1201                 // discriminant. We can safely use a type with the same size
1202                 // as the alignment of the first field of each variant.
1203                 // We increase the size of the discriminant to avoid LLVM copying
1204                 // padding when it doesn't need to. This normally causes unaligned
1205                 // load/stores and excessive memcpy/memset operations. By using a
1206                 // bigger integer size, LLVM can be sure about its contents and
1207                 // won't be so conservative.
1208
1209                 // Use the initial field alignment
1210                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1211                     min_ity
1212                 } else {
1213                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1214                 };
1215
1216                 // If the alignment is not larger than the chosen discriminant size,
1217                 // don't use the alignment as the final size.
1218                 if ity <= min_ity {
1219                     ity = min_ity;
1220                 } else {
1221                     // Patch up the variants' first few fields.
1222                     let old_ity_size = min_ity.size();
1223                     let new_ity_size = ity.size();
1224                     for variant in &mut layout_variants {
1225                         match variant.fields {
1226                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1227                                 for i in offsets {
1228                                     if *i <= old_ity_size {
1229                                         assert_eq!(*i, old_ity_size);
1230                                         *i = new_ity_size;
1231                                     }
1232                                 }
1233                                 // We might be making the struct larger.
1234                                 if variant.size <= old_ity_size {
1235                                     variant.size = new_ity_size;
1236                                 }
1237                             }
1238                             _ => bug!(),
1239                         }
1240                     }
1241                 }
1242
1243                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1244                 let tag = Scalar {
1245                     value: Int(ity, signed),
1246                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1247                 };
1248                 let mut abi = Abi::Aggregate { sized: true };
1249                 if tag.value.size(dl) == size {
1250                     abi = Abi::Scalar(tag.clone());
1251                 } else {
1252                     // Try to use a ScalarPair for all tagged enums.
1253                     let mut common_prim = None;
1254                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1255                         let offsets = match layout_variant.fields {
1256                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1257                             _ => bug!(),
1258                         };
1259                         let mut fields =
1260                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1261                         let (field, offset) = match (fields.next(), fields.next()) {
1262                             (None, None) => continue,
1263                             (Some(pair), None) => pair,
1264                             _ => {
1265                                 common_prim = None;
1266                                 break;
1267                             }
1268                         };
1269                         let prim = match field.abi {
1270                             Abi::Scalar(ref scalar) => scalar.value,
1271                             _ => {
1272                                 common_prim = None;
1273                                 break;
1274                             }
1275                         };
1276                         if let Some(pair) = common_prim {
1277                             // This is pretty conservative. We could go fancier
1278                             // by conflating things like i32 and u32, or even
1279                             // realising that (u8, u8) could just cohabit with
1280                             // u16 or even u32.
1281                             if pair != (prim, offset) {
1282                                 common_prim = None;
1283                                 break;
1284                             }
1285                         } else {
1286                             common_prim = Some((prim, offset));
1287                         }
1288                     }
1289                     if let Some((prim, offset)) = common_prim {
1290                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1291                         let pair_offsets = match pair.fields {
1292                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1293                                 assert_eq!(memory_index, &[0, 1]);
1294                                 offsets
1295                             }
1296                             _ => bug!(),
1297                         };
1298                         if pair_offsets[0] == Size::ZERO
1299                             && pair_offsets[1] == *offset
1300                             && align == pair.align
1301                             && size == pair.size
1302                         {
1303                             // We can use `ScalarPair` only when it matches our
1304                             // already computed layout (including `#[repr(C)]`).
1305                             abi = pair.abi;
1306                         }
1307                     }
1308                 }
1309
1310                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1311                     abi = Abi::Uninhabited;
1312                 }
1313
1314                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1315
1316                 let tagged_layout = Layout {
1317                     variants: Variants::Multiple {
1318                         tag,
1319                         tag_encoding: TagEncoding::Direct,
1320                         tag_field: 0,
1321                         variants: layout_variants,
1322                     },
1323                     fields: FieldsShape::Arbitrary {
1324                         offsets: vec![Size::ZERO],
1325                         memory_index: vec![0],
1326                     },
1327                     largest_niche,
1328                     abi,
1329                     align,
1330                     size,
1331                 };
1332
1333                 let best_layout = match (tagged_layout, niche_filling_layout) {
1334                     (tagged_layout, Some(niche_filling_layout)) => {
1335                         // Pick the smaller layout; otherwise,
1336                         // pick the layout with the larger niche; otherwise,
1337                         // pick tagged as it has simpler codegen.
1338                         cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1339                             let niche_size =
1340                                 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1341                             (layout.size, cmp::Reverse(niche_size))
1342                         })
1343                     }
1344                     (tagged_layout, None) => tagged_layout,
1345                 };
1346
1347                 tcx.intern_layout(best_layout)
1348             }
1349
1350             // Types with no meaningful known layout.
1351             ty::Projection(_) | ty::Opaque(..) => {
1352                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1353                 if ty == normalized {
1354                     return Err(LayoutError::Unknown(ty));
1355                 }
1356                 tcx.layout_raw(param_env.and(normalized))?
1357             }
1358
1359             ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1360                 bug!("Layout::compute: unexpected type `{}`", ty)
1361             }
1362
1363             ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1364                 return Err(LayoutError::Unknown(ty));
1365             }
1366         })
1367     }
1368 }
1369
1370 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1371 #[derive(Clone, Debug, PartialEq)]
1372 enum SavedLocalEligibility {
1373     Unassigned,
1374     Assigned(VariantIdx),
1375     // FIXME: Use newtype_index so we aren't wasting bytes
1376     Ineligible(Option<u32>),
1377 }
1378
1379 // When laying out generators, we divide our saved local fields into two
1380 // categories: overlap-eligible and overlap-ineligible.
1381 //
1382 // Those fields which are ineligible for overlap go in a "prefix" at the
1383 // beginning of the layout, and always have space reserved for them.
1384 //
1385 // Overlap-eligible fields are only assigned to one variant, so we lay
1386 // those fields out for each variant and put them right after the
1387 // prefix.
1388 //
1389 // Finally, in the layout details, we point to the fields from the
1390 // variants they are assigned to. It is possible for some fields to be
1391 // included in multiple variants. No field ever "moves around" in the
1392 // layout; its offset is always the same.
1393 //
1394 // Also included in the layout are the upvars and the discriminant.
1395 // These are included as fields on the "outer" layout; they are not part
1396 // of any variant.
1397 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1398     /// Compute the eligibility and assignment of each local.
1399     fn generator_saved_local_eligibility(
1400         &self,
1401         info: &GeneratorLayout<'tcx>,
1402     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1403         use SavedLocalEligibility::*;
1404
1405         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1406             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1407
1408         // The saved locals not eligible for overlap. These will get
1409         // "promoted" to the prefix of our generator.
1410         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1411
1412         // Figure out which of our saved locals are fields in only
1413         // one variant. The rest are deemed ineligible for overlap.
1414         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1415             for local in fields {
1416                 match assignments[*local] {
1417                     Unassigned => {
1418                         assignments[*local] = Assigned(variant_index);
1419                     }
1420                     Assigned(idx) => {
1421                         // We've already seen this local at another suspension
1422                         // point, so it is no longer a candidate.
1423                         trace!(
1424                             "removing local {:?} in >1 variant ({:?}, {:?})",
1425                             local,
1426                             variant_index,
1427                             idx
1428                         );
1429                         ineligible_locals.insert(*local);
1430                         assignments[*local] = Ineligible(None);
1431                     }
1432                     Ineligible(_) => {}
1433                 }
1434             }
1435         }
1436
1437         // Next, check every pair of eligible locals to see if they
1438         // conflict.
1439         for local_a in info.storage_conflicts.rows() {
1440             let conflicts_a = info.storage_conflicts.count(local_a);
1441             if ineligible_locals.contains(local_a) {
1442                 continue;
1443             }
1444
1445             for local_b in info.storage_conflicts.iter(local_a) {
1446                 // local_a and local_b are storage live at the same time, therefore they
1447                 // cannot overlap in the generator layout. The only way to guarantee
1448                 // this is if they are in the same variant, or one is ineligible
1449                 // (which means it is stored in every variant).
1450                 if ineligible_locals.contains(local_b)
1451                     || assignments[local_a] == assignments[local_b]
1452                 {
1453                     continue;
1454                 }
1455
1456                 // If they conflict, we will choose one to make ineligible.
1457                 // This is not always optimal; it's just a greedy heuristic that
1458                 // seems to produce good results most of the time.
1459                 let conflicts_b = info.storage_conflicts.count(local_b);
1460                 let (remove, other) =
1461                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1462                 ineligible_locals.insert(remove);
1463                 assignments[remove] = Ineligible(None);
1464                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1465             }
1466         }
1467
1468         // Count the number of variants in use. If only one of them, then it is
1469         // impossible to overlap any locals in our layout. In this case it's
1470         // always better to make the remaining locals ineligible, so we can
1471         // lay them out with the other locals in the prefix and eliminate
1472         // unnecessary padding bytes.
1473         {
1474             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1475             for assignment in &assignments {
1476                 if let Assigned(idx) = assignment {
1477                     used_variants.insert(*idx);
1478                 }
1479             }
1480             if used_variants.count() < 2 {
1481                 for assignment in assignments.iter_mut() {
1482                     *assignment = Ineligible(None);
1483                 }
1484                 ineligible_locals.insert_all();
1485             }
1486         }
1487
1488         // Write down the order of our locals that will be promoted to the prefix.
1489         {
1490             for (idx, local) in ineligible_locals.iter().enumerate() {
1491                 assignments[local] = Ineligible(Some(idx as u32));
1492             }
1493         }
1494         debug!("generator saved local assignments: {:?}", assignments);
1495
1496         (ineligible_locals, assignments)
1497     }
1498
1499     /// Compute the full generator layout.
1500     fn generator_layout(
1501         &self,
1502         ty: Ty<'tcx>,
1503         def_id: hir::def_id::DefId,
1504         substs: SubstsRef<'tcx>,
1505     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1506         use SavedLocalEligibility::*;
1507         let tcx = self.tcx;
1508         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1509
1510         let info = match tcx.generator_layout(def_id) {
1511             None => return Err(LayoutError::Unknown(ty)),
1512             Some(info) => info,
1513         };
1514         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1515
1516         // Build a prefix layout, including "promoting" all ineligible
1517         // locals as part of the prefix. We compute the layout of all of
1518         // these fields at once to get optimal packing.
1519         let tag_index = substs.as_generator().prefix_tys().count();
1520
1521         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1522         let max_discr = (info.variant_fields.len() - 1) as u128;
1523         let discr_int = Integer::fit_unsigned(max_discr);
1524         let discr_int_ty = discr_int.to_ty(tcx, false);
1525         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1526         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1527         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1528
1529         let promoted_layouts = ineligible_locals
1530             .iter()
1531             .map(|local| subst_field(info.field_tys[local]))
1532             .map(|ty| tcx.mk_maybe_uninit(ty))
1533             .map(|ty| self.layout_of(ty));
1534         let prefix_layouts = substs
1535             .as_generator()
1536             .prefix_tys()
1537             .map(|ty| self.layout_of(ty))
1538             .chain(iter::once(Ok(tag_layout)))
1539             .chain(promoted_layouts)
1540             .collect::<Result<Vec<_>, _>>()?;
1541         let prefix = self.univariant_uninterned(
1542             ty,
1543             &prefix_layouts,
1544             &ReprOptions::default(),
1545             StructKind::AlwaysSized,
1546         )?;
1547
1548         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1549
1550         // Split the prefix layout into the "outer" fields (upvars and
1551         // discriminant) and the "promoted" fields. Promoted fields will
1552         // get included in each variant that requested them in
1553         // GeneratorLayout.
1554         debug!("prefix = {:#?}", prefix);
1555         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1556             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1557                 let mut inverse_memory_index = invert_mapping(&memory_index);
1558
1559                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1560                 // "outer" and "promoted" fields respectively.
1561                 let b_start = (tag_index + 1) as u32;
1562                 let offsets_b = offsets.split_off(b_start as usize);
1563                 let offsets_a = offsets;
1564
1565                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1566                 // by preserving the order but keeping only one disjoint "half" each.
1567                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1568                 let inverse_memory_index_b: Vec<_> =
1569                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1570                 inverse_memory_index.retain(|&i| i < b_start);
1571                 let inverse_memory_index_a = inverse_memory_index;
1572
1573                 // Since `inverse_memory_index_{a,b}` each only refer to their
1574                 // respective fields, they can be safely inverted
1575                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1576                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1577
1578                 let outer_fields =
1579                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1580                 (outer_fields, offsets_b, memory_index_b)
1581             }
1582             _ => bug!(),
1583         };
1584
1585         let mut size = prefix.size;
1586         let mut align = prefix.align;
1587         let variants = info
1588             .variant_fields
1589             .iter_enumerated()
1590             .map(|(index, variant_fields)| {
1591                 // Only include overlap-eligible fields when we compute our variant layout.
1592                 let variant_only_tys = variant_fields
1593                     .iter()
1594                     .filter(|local| match assignments[**local] {
1595                         Unassigned => bug!(),
1596                         Assigned(v) if v == index => true,
1597                         Assigned(_) => bug!("assignment does not match variant"),
1598                         Ineligible(_) => false,
1599                     })
1600                     .map(|local| subst_field(info.field_tys[*local]));
1601
1602                 let mut variant = self.univariant_uninterned(
1603                     ty,
1604                     &variant_only_tys
1605                         .map(|ty| self.layout_of(ty))
1606                         .collect::<Result<Vec<_>, _>>()?,
1607                     &ReprOptions::default(),
1608                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1609                 )?;
1610                 variant.variants = Variants::Single { index };
1611
1612                 let (offsets, memory_index) = match variant.fields {
1613                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1614                     _ => bug!(),
1615                 };
1616
1617                 // Now, stitch the promoted and variant-only fields back together in
1618                 // the order they are mentioned by our GeneratorLayout.
1619                 // Because we only use some subset (that can differ between variants)
1620                 // of the promoted fields, we can't just pick those elements of the
1621                 // `promoted_memory_index` (as we'd end up with gaps).
1622                 // So instead, we build an "inverse memory_index", as if all of the
1623                 // promoted fields were being used, but leave the elements not in the
1624                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1625                 // obtain a valid (bijective) mapping.
1626                 const INVALID_FIELD_IDX: u32 = !0;
1627                 let mut combined_inverse_memory_index =
1628                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1629                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1630                 let combined_offsets = variant_fields
1631                     .iter()
1632                     .enumerate()
1633                     .map(|(i, local)| {
1634                         let (offset, memory_index) = match assignments[*local] {
1635                             Unassigned => bug!(),
1636                             Assigned(_) => {
1637                                 let (offset, memory_index) =
1638                                     offsets_and_memory_index.next().unwrap();
1639                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1640                             }
1641                             Ineligible(field_idx) => {
1642                                 let field_idx = field_idx.unwrap() as usize;
1643                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1644                             }
1645                         };
1646                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1647                         offset
1648                     })
1649                     .collect();
1650
1651                 // Remove the unused slots and invert the mapping to obtain the
1652                 // combined `memory_index` (also see previous comment).
1653                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1654                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1655
1656                 variant.fields = FieldsShape::Arbitrary {
1657                     offsets: combined_offsets,
1658                     memory_index: combined_memory_index,
1659                 };
1660
1661                 size = size.max(variant.size);
1662                 align = align.max(variant.align);
1663                 Ok(variant)
1664             })
1665             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1666
1667         size = size.align_to(align.abi);
1668
1669         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1670         {
1671             Abi::Uninhabited
1672         } else {
1673             Abi::Aggregate { sized: true }
1674         };
1675
1676         let layout = tcx.intern_layout(Layout {
1677             variants: Variants::Multiple {
1678                 tag,
1679                 tag_encoding: TagEncoding::Direct,
1680                 tag_field: tag_index,
1681                 variants,
1682             },
1683             fields: outer_fields,
1684             abi,
1685             largest_niche: prefix.largest_niche,
1686             size,
1687             align,
1688         });
1689         debug!("generator layout ({:?}): {:#?}", ty, layout);
1690         Ok(layout)
1691     }
1692
1693     /// This is invoked by the `layout_raw` query to record the final
1694     /// layout of each type.
1695     #[inline(always)]
1696     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1697         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1698         // for dumping later.
1699         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1700             self.record_layout_for_printing_outlined(layout)
1701         }
1702     }
1703
1704     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1705         // Ignore layouts that are done with non-empty environments or
1706         // non-monomorphic layouts, as the user only wants to see the stuff
1707         // resulting from the final codegen session.
1708         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1709             return;
1710         }
1711
1712         // (delay format until we actually need it)
1713         let record = |kind, packed, opt_discr_size, variants| {
1714             let type_desc = format!("{:?}", layout.ty);
1715             self.tcx.sess.code_stats.record_type_size(
1716                 kind,
1717                 type_desc,
1718                 layout.align.abi,
1719                 layout.size,
1720                 packed,
1721                 opt_discr_size,
1722                 variants,
1723             );
1724         };
1725
1726         let adt_def = match *layout.ty.kind() {
1727             ty::Adt(ref adt_def, _) => {
1728                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1729                 adt_def
1730             }
1731
1732             ty::Closure(..) => {
1733                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1734                 record(DataTypeKind::Closure, false, None, vec![]);
1735                 return;
1736             }
1737
1738             _ => {
1739                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1740                 return;
1741             }
1742         };
1743
1744         let adt_kind = adt_def.adt_kind();
1745         let adt_packed = adt_def.repr.pack.is_some();
1746
1747         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1748             let mut min_size = Size::ZERO;
1749             let field_info: Vec<_> = flds
1750                 .iter()
1751                 .enumerate()
1752                 .map(|(i, &name)| match layout.field(self, i) {
1753                     Err(err) => {
1754                         bug!("no layout found for field {}: `{:?}`", name, err);
1755                     }
1756                     Ok(field_layout) => {
1757                         let offset = layout.fields.offset(i);
1758                         let field_end = offset + field_layout.size;
1759                         if min_size < field_end {
1760                             min_size = field_end;
1761                         }
1762                         FieldInfo {
1763                             name: name.to_string(),
1764                             offset: offset.bytes(),
1765                             size: field_layout.size.bytes(),
1766                             align: field_layout.align.abi.bytes(),
1767                         }
1768                     }
1769                 })
1770                 .collect();
1771
1772             VariantInfo {
1773                 name: n.map(|n| n.to_string()),
1774                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1775                 align: layout.align.abi.bytes(),
1776                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1777                 fields: field_info,
1778             }
1779         };
1780
1781         match layout.variants {
1782             Variants::Single { index } => {
1783                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1784                 if !adt_def.variants.is_empty() {
1785                     let variant_def = &adt_def.variants[index];
1786                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1787                     record(
1788                         adt_kind.into(),
1789                         adt_packed,
1790                         None,
1791                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1792                     );
1793                 } else {
1794                     // (This case arises for *empty* enums; so give it
1795                     // zero variants.)
1796                     record(adt_kind.into(), adt_packed, None, vec![]);
1797                 }
1798             }
1799
1800             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1801                 debug!(
1802                     "print-type-size `{:#?}` adt general variants def {}",
1803                     layout.ty,
1804                     adt_def.variants.len()
1805                 );
1806                 let variant_infos: Vec<_> = adt_def
1807                     .variants
1808                     .iter_enumerated()
1809                     .map(|(i, variant_def)| {
1810                         let fields: Vec<_> =
1811                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1812                         build_variant_info(
1813                             Some(variant_def.ident),
1814                             &fields,
1815                             layout.for_variant(self, i),
1816                         )
1817                     })
1818                     .collect();
1819                 record(
1820                     adt_kind.into(),
1821                     adt_packed,
1822                     match tag_encoding {
1823                         TagEncoding::Direct => Some(tag.value.size(self)),
1824                         _ => None,
1825                     },
1826                     variant_infos,
1827                 );
1828             }
1829         }
1830     }
1831 }
1832
1833 /// Type size "skeleton", i.e., the only information determining a type's size.
1834 /// While this is conservative, (aside from constant sizes, only pointers,
1835 /// newtypes thereof and null pointer optimized enums are allowed), it is
1836 /// enough to statically check common use cases of transmute.
1837 #[derive(Copy, Clone, Debug)]
1838 pub enum SizeSkeleton<'tcx> {
1839     /// Any statically computable Layout.
1840     Known(Size),
1841
1842     /// A potentially-fat pointer.
1843     Pointer {
1844         /// If true, this pointer is never null.
1845         non_zero: bool,
1846         /// The type which determines the unsized metadata, if any,
1847         /// of this pointer. Either a type parameter or a projection
1848         /// depending on one, with regions erased.
1849         tail: Ty<'tcx>,
1850     },
1851 }
1852
1853 impl<'tcx> SizeSkeleton<'tcx> {
1854     pub fn compute(
1855         ty: Ty<'tcx>,
1856         tcx: TyCtxt<'tcx>,
1857         param_env: ty::ParamEnv<'tcx>,
1858     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1859         debug_assert!(!ty.has_infer_types_or_consts());
1860
1861         // First try computing a static layout.
1862         let err = match tcx.layout_of(param_env.and(ty)) {
1863             Ok(layout) => {
1864                 return Ok(SizeSkeleton::Known(layout.size));
1865             }
1866             Err(err) => err,
1867         };
1868
1869         match *ty.kind() {
1870             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1871                 let non_zero = !ty.is_unsafe_ptr();
1872                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1873                 match tail.kind() {
1874                     ty::Param(_) | ty::Projection(_) => {
1875                         debug_assert!(tail.has_param_types_or_consts());
1876                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1877                     }
1878                     _ => bug!(
1879                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1880                               tail `{}` is not a type parameter or a projection",
1881                         ty,
1882                         err,
1883                         tail
1884                     ),
1885                 }
1886             }
1887
1888             ty::Adt(def, substs) => {
1889                 // Only newtypes and enums w/ nullable pointer optimization.
1890                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1891                     return Err(err);
1892                 }
1893
1894                 // Get a zero-sized variant or a pointer newtype.
1895                 let zero_or_ptr_variant = |i| {
1896                     let i = VariantIdx::new(i);
1897                     let fields = def.variants[i]
1898                         .fields
1899                         .iter()
1900                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1901                     let mut ptr = None;
1902                     for field in fields {
1903                         let field = field?;
1904                         match field {
1905                             SizeSkeleton::Known(size) => {
1906                                 if size.bytes() > 0 {
1907                                     return Err(err);
1908                                 }
1909                             }
1910                             SizeSkeleton::Pointer { .. } => {
1911                                 if ptr.is_some() {
1912                                     return Err(err);
1913                                 }
1914                                 ptr = Some(field);
1915                             }
1916                         }
1917                     }
1918                     Ok(ptr)
1919                 };
1920
1921                 let v0 = zero_or_ptr_variant(0)?;
1922                 // Newtype.
1923                 if def.variants.len() == 1 {
1924                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1925                         return Ok(SizeSkeleton::Pointer {
1926                             non_zero: non_zero
1927                                 || match tcx.layout_scalar_valid_range(def.did) {
1928                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1929                                     (Bound::Included(start), Bound::Included(end)) => {
1930                                         0 < start && start < end
1931                                     }
1932                                     _ => false,
1933                                 },
1934                             tail,
1935                         });
1936                     } else {
1937                         return Err(err);
1938                     }
1939                 }
1940
1941                 let v1 = zero_or_ptr_variant(1)?;
1942                 // Nullable pointer enum optimization.
1943                 match (v0, v1) {
1944                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1945                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1946                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1947                     }
1948                     _ => Err(err),
1949                 }
1950             }
1951
1952             ty::Projection(_) | ty::Opaque(..) => {
1953                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1954                 if ty == normalized {
1955                     Err(err)
1956                 } else {
1957                     SizeSkeleton::compute(normalized, tcx, param_env)
1958                 }
1959             }
1960
1961             _ => Err(err),
1962         }
1963     }
1964
1965     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1966         match (self, other) {
1967             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1968             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1969                 a == b
1970             }
1971             _ => false,
1972         }
1973     }
1974 }
1975
1976 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1977     fn tcx(&self) -> TyCtxt<'tcx>;
1978 }
1979
1980 pub trait HasParamEnv<'tcx> {
1981     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1982 }
1983
1984 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1985     fn data_layout(&self) -> &TargetDataLayout {
1986         &self.data_layout
1987     }
1988 }
1989
1990 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1991     fn tcx(&self) -> TyCtxt<'tcx> {
1992         *self
1993     }
1994 }
1995
1996 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1997     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1998         self.param_env
1999     }
2000 }
2001
2002 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2003     fn data_layout(&self) -> &TargetDataLayout {
2004         self.tcx.data_layout()
2005     }
2006 }
2007
2008 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2009     fn tcx(&self) -> TyCtxt<'tcx> {
2010         self.tcx.tcx()
2011     }
2012 }
2013
2014 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2015
2016 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
2017     type Ty = Ty<'tcx>;
2018     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2019
2020     /// Computes the layout of a type. Note that this implicitly
2021     /// executes in "reveal all" mode.
2022     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2023         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
2024         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2025         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2026         let layout = TyAndLayout { ty, layout };
2027
2028         // N.B., this recording is normally disabled; when enabled, it
2029         // can however trigger recursive invocations of `layout_of`.
2030         // Therefore, we execute it *after* the main query has
2031         // completed, to avoid problems around recursive structures
2032         // and the like. (Admittedly, I wasn't able to reproduce a problem
2033         // here, but it seems like the right thing to do. -nmatsakis)
2034         self.record_layout_for_printing(layout);
2035
2036         Ok(layout)
2037     }
2038 }
2039
2040 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2041     type Ty = Ty<'tcx>;
2042     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2043
2044     /// Computes the layout of a type. Note that this implicitly
2045     /// executes in "reveal all" mode.
2046     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2047         let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2048         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2049         let layout = self.tcx.layout_raw(param_env.and(ty))?;
2050         let layout = TyAndLayout { ty, layout };
2051
2052         // N.B., this recording is normally disabled; when enabled, it
2053         // can however trigger recursive invocations of `layout_of`.
2054         // Therefore, we execute it *after* the main query has
2055         // completed, to avoid problems around recursive structures
2056         // and the like. (Admittedly, I wasn't able to reproduce a problem
2057         // here, but it seems like the right thing to do. -nmatsakis)
2058         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2059         cx.record_layout_for_printing(layout);
2060
2061         Ok(layout)
2062     }
2063 }
2064
2065 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2066 impl TyCtxt<'tcx> {
2067     /// Computes the layout of a type. Note that this implicitly
2068     /// executes in "reveal all" mode.
2069     #[inline]
2070     pub fn layout_of(
2071         self,
2072         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2073     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2074         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2075         cx.layout_of(param_env_and_ty.value)
2076     }
2077 }
2078
2079 impl ty::query::TyCtxtAt<'tcx> {
2080     /// Computes the layout of a type. Note that this implicitly
2081     /// executes in "reveal all" mode.
2082     #[inline]
2083     pub fn layout_of(
2084         self,
2085         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2086     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2087         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2088         cx.layout_of(param_env_and_ty.value)
2089     }
2090 }
2091
2092 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2093 where
2094     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2095         + HasTyCtxt<'tcx>
2096         + HasParamEnv<'tcx>,
2097 {
2098     fn for_variant(
2099         this: TyAndLayout<'tcx>,
2100         cx: &C,
2101         variant_index: VariantIdx,
2102     ) -> TyAndLayout<'tcx> {
2103         let layout = match this.variants {
2104             Variants::Single { index }
2105                 // If all variants but one are uninhabited, the variant layout is the enum layout.
2106                 if index == variant_index &&
2107                 // Don't confuse variants of uninhabited enums with the enum itself.
2108                 // For more details see https://github.com/rust-lang/rust/issues/69763.
2109                 this.fields != FieldsShape::Primitive =>
2110             {
2111                 this.layout
2112             }
2113
2114             Variants::Single { index } => {
2115                 // Deny calling for_variant more than once for non-Single enums.
2116                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2117                     assert_eq!(original_layout.variants, Variants::Single { index });
2118                 }
2119
2120                 let fields = match this.ty.kind() {
2121                     ty::Adt(def, _) if def.variants.is_empty() =>
2122                         bug!("for_variant called on zero-variant enum"),
2123                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2124                     _ => bug!(),
2125                 };
2126                 let tcx = cx.tcx();
2127                 tcx.intern_layout(Layout {
2128                     variants: Variants::Single { index: variant_index },
2129                     fields: match NonZeroUsize::new(fields) {
2130                         Some(fields) => FieldsShape::Union(fields),
2131                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2132                     },
2133                     abi: Abi::Uninhabited,
2134                     largest_niche: None,
2135                     align: tcx.data_layout.i8_align,
2136                     size: Size::ZERO,
2137                 })
2138             }
2139
2140             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2141         };
2142
2143         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2144
2145         TyAndLayout { ty: this.ty, layout }
2146     }
2147
2148     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2149         enum TyMaybeWithLayout<C: LayoutOf> {
2150             Ty(C::Ty),
2151             TyAndLayout(C::TyAndLayout),
2152         }
2153
2154         fn ty_and_layout_kind<
2155             C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2156                 + HasTyCtxt<'tcx>
2157                 + HasParamEnv<'tcx>,
2158         >(
2159             this: TyAndLayout<'tcx>,
2160             cx: &C,
2161             i: usize,
2162             ty: C::Ty,
2163         ) -> TyMaybeWithLayout<C> {
2164             let tcx = cx.tcx();
2165             let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2166                 let layout = Layout::scalar(cx, tag.clone());
2167                 MaybeResult::from(Ok(TyAndLayout {
2168                     layout: tcx.intern_layout(layout),
2169                     ty: tag.value.to_ty(tcx),
2170                 }))
2171             };
2172
2173             match *ty.kind() {
2174                 ty::Bool
2175                 | ty::Char
2176                 | ty::Int(_)
2177                 | ty::Uint(_)
2178                 | ty::Float(_)
2179                 | ty::FnPtr(_)
2180                 | ty::Never
2181                 | ty::FnDef(..)
2182                 | ty::GeneratorWitness(..)
2183                 | ty::Foreign(..)
2184                 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2185
2186                 // Potentially-fat pointers.
2187                 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2188                     assert!(i < this.fields.count());
2189
2190                     // Reuse the fat `*T` type as its own thin pointer data field.
2191                     // This provides information about, e.g., DST struct pointees
2192                     // (which may have no non-DST form), and will work as long
2193                     // as the `Abi` or `FieldsShape` is checked by users.
2194                     if i == 0 {
2195                         let nil = tcx.mk_unit();
2196                         let ptr_ty = if ty.is_unsafe_ptr() {
2197                             tcx.mk_mut_ptr(nil)
2198                         } else {
2199                             tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2200                         };
2201                         return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2202                             cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2203                                 ptr_layout.ty = ty;
2204                                 ptr_layout
2205                             }),
2206                         ));
2207                     }
2208
2209                     match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2210                         ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2211                         ty::Dynamic(_, _) => {
2212                             TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2213                                 tcx.lifetimes.re_static,
2214                                 tcx.mk_array(tcx.types.usize, 3),
2215                             ))
2216                             /* FIXME: use actual fn pointers
2217                             Warning: naively computing the number of entries in the
2218                             vtable by counting the methods on the trait + methods on
2219                             all parent traits does not work, because some methods can
2220                             be not object safe and thus excluded from the vtable.
2221                             Increase this counter if you tried to implement this but
2222                             failed to do it without duplicating a lot of code from
2223                             other places in the compiler: 2
2224                             tcx.mk_tup(&[
2225                                 tcx.mk_array(tcx.types.usize, 3),
2226                                 tcx.mk_array(Option<fn()>),
2227                             ])
2228                             */
2229                         }
2230                         _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2231                     }
2232                 }
2233
2234                 // Arrays and slices.
2235                 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2236                 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2237
2238                 // Tuples, generators and closures.
2239                 ty::Closure(_, ref substs) => {
2240                     ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2241                 }
2242
2243                 ty::Generator(def_id, ref substs, _) => match this.variants {
2244                     Variants::Single { index } => TyMaybeWithLayout::Ty(
2245                         substs
2246                             .as_generator()
2247                             .state_tys(def_id, tcx)
2248                             .nth(index.as_usize())
2249                             .unwrap()
2250                             .nth(i)
2251                             .unwrap(),
2252                     ),
2253                     Variants::Multiple { ref tag, tag_field, .. } => {
2254                         if i == tag_field {
2255                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2256                         }
2257                         TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2258                     }
2259                 },
2260
2261                 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2262
2263                 // ADTs.
2264                 ty::Adt(def, substs) => {
2265                     match this.variants {
2266                         Variants::Single { index } => {
2267                             TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2268                         }
2269
2270                         // Discriminant field for enums (where applicable).
2271                         Variants::Multiple { ref tag, .. } => {
2272                             assert_eq!(i, 0);
2273                             return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2274                         }
2275                     }
2276                 }
2277
2278                 ty::Projection(_)
2279                 | ty::Bound(..)
2280                 | ty::Placeholder(..)
2281                 | ty::Opaque(..)
2282                 | ty::Param(_)
2283                 | ty::Infer(_)
2284                 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2285             }
2286         }
2287
2288         cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2289             TyMaybeWithLayout::Ty(result) => result,
2290             TyMaybeWithLayout::TyAndLayout(result) => return result,
2291         })
2292     }
2293
2294     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2295         let addr_space_of_ty = |ty: Ty<'tcx>| {
2296             if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2297         };
2298
2299         let pointee_info = match *this.ty.kind() {
2300             ty::RawPtr(mt) if offset.bytes() == 0 => {
2301                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2302                     size: layout.size,
2303                     align: layout.align.abi,
2304                     safe: None,
2305                     address_space: addr_space_of_ty(mt.ty),
2306                 })
2307             }
2308             ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2309                 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2310                     PointeeInfo {
2311                         size: layout.size,
2312                         align: layout.align.abi,
2313                         safe: None,
2314                         address_space: cx.data_layout().instruction_address_space,
2315                     }
2316                 })
2317             }
2318             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2319                 let address_space = addr_space_of_ty(ty);
2320                 let tcx = cx.tcx();
2321                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2322                 let kind = match mt {
2323                     hir::Mutability::Not => {
2324                         if is_freeze {
2325                             PointerKind::Frozen
2326                         } else {
2327                             PointerKind::Shared
2328                         }
2329                     }
2330                     hir::Mutability::Mut => {
2331                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2332                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2333                         // in conjunction with unwinding, but later versions didn’t seem to have
2334                         // said issues. See issue #31681.
2335                         //
2336                         // Alas, later on we encountered a case where noalias would generate wrong
2337                         // code altogether even with recent versions of LLVM in *safe* code with no
2338                         // unwinding involved. See #54462.
2339                         //
2340                         // For now, do not enable mutable_noalias by default at all, while the
2341                         // issue is being figured out.
2342                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2343                             PointerKind::UniqueBorrowed
2344                         } else {
2345                             PointerKind::Shared
2346                         }
2347                     }
2348                 };
2349
2350                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2351                     size: layout.size,
2352                     align: layout.align.abi,
2353                     safe: Some(kind),
2354                     address_space,
2355                 })
2356             }
2357
2358             _ => {
2359                 let mut data_variant = match this.variants {
2360                     // Within the discriminant field, only the niche itself is
2361                     // always initialized, so we only check for a pointer at its
2362                     // offset.
2363                     //
2364                     // If the niche is a pointer, it's either valid (according
2365                     // to its type), or null (which the niche field's scalar
2366                     // validity range encodes).  This allows using
2367                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2368                     // this will continue to work as long as we don't start
2369                     // using more niches than just null (e.g., the first page of
2370                     // the address space, or unaligned pointers).
2371                     Variants::Multiple {
2372                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2373                         tag_field,
2374                         ..
2375                     } if this.fields.offset(tag_field) == offset => {
2376                         Some(this.for_variant(cx, dataful_variant))
2377                     }
2378                     _ => Some(this),
2379                 };
2380
2381                 if let Some(variant) = data_variant {
2382                     // We're not interested in any unions.
2383                     if let FieldsShape::Union(_) = variant.fields {
2384                         data_variant = None;
2385                     }
2386                 }
2387
2388                 let mut result = None;
2389
2390                 if let Some(variant) = data_variant {
2391                     let ptr_end = offset + Pointer.size(cx);
2392                     for i in 0..variant.fields.count() {
2393                         let field_start = variant.fields.offset(i);
2394                         if field_start <= offset {
2395                             let field = variant.field(cx, i);
2396                             result = field.to_result().ok().and_then(|field| {
2397                                 if ptr_end <= field_start + field.size {
2398                                     // We found the right field, look inside it.
2399                                     let field_info =
2400                                         field.pointee_info_at(cx, offset - field_start);
2401                                     field_info
2402                                 } else {
2403                                     None
2404                                 }
2405                             });
2406                             if result.is_some() {
2407                                 break;
2408                             }
2409                         }
2410                     }
2411                 }
2412
2413                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2414                 if let Some(ref mut pointee) = result {
2415                     if let ty::Adt(def, _) = this.ty.kind() {
2416                         if def.is_box() && offset.bytes() == 0 {
2417                             pointee.safe = Some(PointerKind::UniqueOwned);
2418                         }
2419                     }
2420                 }
2421
2422                 result
2423             }
2424         };
2425
2426         debug!(
2427             "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2428             offset,
2429             this.ty.kind(),
2430             pointee_info
2431         );
2432
2433         pointee_info
2434     }
2435 }
2436
2437 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2438     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2439         use crate::ty::layout::LayoutError::*;
2440         mem::discriminant(self).hash_stable(hcx, hasher);
2441
2442         match *self {
2443             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2444         }
2445     }
2446 }
2447
2448 impl<'tcx> ty::Instance<'tcx> {
2449     // NOTE(eddyb) this is private to avoid using it from outside of
2450     // `FnAbi::of_instance` - any other uses are either too high-level
2451     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2452     // or should go through `FnAbi` instead, to avoid losing any
2453     // adjustments `FnAbi::of_instance` might be performing.
2454     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2455         // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2456         let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2457         match *ty.kind() {
2458             ty::FnDef(..) => {
2459                 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2460                 // parameters unused if they show up in the signature, but not in the `mir::Body`
2461                 // (i.e. due to being inside a projection that got normalized, see
2462                 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2463                 // track of a polymorphization `ParamEnv` to allow normalizing later.
2464                 let mut sig = match *ty.kind() {
2465                     ty::FnDef(def_id, substs) => tcx
2466                         .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2467                         .subst(tcx, substs),
2468                     _ => unreachable!(),
2469                 };
2470
2471                 if let ty::InstanceDef::VtableShim(..) = self.def {
2472                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2473                     sig = sig.map_bound(|mut sig| {
2474                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2475                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2476                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2477                         sig
2478                     });
2479                 }
2480                 sig
2481             }
2482             ty::Closure(def_id, substs) => {
2483                 let sig = substs.as_closure().sig();
2484
2485                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2486                 sig.map_bound(|sig| {
2487                     tcx.mk_fn_sig(
2488                         iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2489                         sig.output(),
2490                         sig.c_variadic,
2491                         sig.unsafety,
2492                         sig.abi,
2493                     )
2494                 })
2495             }
2496             ty::Generator(_, substs, _) => {
2497                 let sig = substs.as_generator().poly_sig();
2498
2499                 let br = ty::BoundRegion { kind: ty::BrEnv };
2500                 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2501                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2502
2503                 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2504                 let pin_adt_ref = tcx.adt_def(pin_did);
2505                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2506                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2507
2508                 sig.map_bound(|sig| {
2509                     let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2510                     let state_adt_ref = tcx.adt_def(state_did);
2511                     let state_substs =
2512                         tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2513                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2514
2515                     tcx.mk_fn_sig(
2516                         [env_ty, sig.resume_ty].iter(),
2517                         &ret_ty,
2518                         false,
2519                         hir::Unsafety::Normal,
2520                         rustc_target::spec::abi::Abi::Rust,
2521                     )
2522                 })
2523             }
2524             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2525         }
2526     }
2527 }
2528
2529 pub trait FnAbiExt<'tcx, C>
2530 where
2531     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2532         + HasDataLayout
2533         + HasTargetSpec
2534         + HasTyCtxt<'tcx>
2535         + HasParamEnv<'tcx>,
2536 {
2537     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2538     ///
2539     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2540     /// instead, where the instance is a `InstanceDef::Virtual`.
2541     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2542
2543     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2544     /// direct calls to an `fn`.
2545     ///
2546     /// NB: that includes virtual calls, which are represented by "direct calls"
2547     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2548     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2549
2550     fn new_internal(
2551         cx: &C,
2552         sig: ty::PolyFnSig<'tcx>,
2553         extra_args: &[Ty<'tcx>],
2554         caller_location: Option<Ty<'tcx>>,
2555         codegen_fn_attr_flags: CodegenFnAttrFlags,
2556         make_self_ptr_thin: bool,
2557     ) -> Self;
2558     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2559 }
2560
2561 fn fn_can_unwind(
2562     panic_strategy: PanicStrategy,
2563     codegen_fn_attr_flags: CodegenFnAttrFlags,
2564     call_conv: Conv,
2565 ) -> bool {
2566     if panic_strategy != PanicStrategy::Unwind {
2567         // In panic=abort mode we assume nothing can unwind anywhere, so
2568         // optimize based on this!
2569         false
2570     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2571         // If a specific #[unwind] attribute is present, use that.
2572         true
2573     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2574         // Special attribute for allocator functions, which can't unwind.
2575         false
2576     } else {
2577         if call_conv == Conv::Rust {
2578             // Any Rust method (or `extern "Rust" fn` or `extern
2579             // "rust-call" fn`) is explicitly allowed to unwind
2580             // (unless it has no-unwind attribute, handled above).
2581             true
2582         } else {
2583             // Anything else is either:
2584             //
2585             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2586             //
2587             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2588             //
2589             // Foreign items (case 1) are assumed to not unwind; it is
2590             // UB otherwise. (At least for now; see also
2591             // rust-lang/rust#63909 and Rust RFC 2753.)
2592             //
2593             // Items defined in Rust with non-Rust ABIs (case 2) are also
2594             // not supposed to unwind. Whether this should be enforced
2595             // (versus stating it is UB) and *how* it would be enforced
2596             // is currently under discussion; see rust-lang/rust#58794.
2597             //
2598             // In either case, we mark item as explicitly nounwind.
2599             false
2600         }
2601     }
2602 }
2603
2604 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2605 where
2606     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2607         + HasDataLayout
2608         + HasTargetSpec
2609         + HasTyCtxt<'tcx>
2610         + HasParamEnv<'tcx>,
2611 {
2612     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2613         // Assume that fn pointers may always unwind
2614         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2615
2616         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
2617     }
2618
2619     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2620         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2621
2622         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2623             Some(cx.tcx().caller_location_ty())
2624         } else {
2625             None
2626         };
2627
2628         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2629
2630         call::FnAbi::new_internal(
2631             cx,
2632             sig,
2633             extra_args,
2634             caller_location,
2635             attrs,
2636             matches!(instance.def, ty::InstanceDef::Virtual(..)),
2637         )
2638     }
2639
2640     fn new_internal(
2641         cx: &C,
2642         sig: ty::PolyFnSig<'tcx>,
2643         extra_args: &[Ty<'tcx>],
2644         caller_location: Option<Ty<'tcx>>,
2645         codegen_fn_attr_flags: CodegenFnAttrFlags,
2646         force_thin_self_ptr: bool,
2647     ) -> Self {
2648         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2649
2650         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2651
2652         use rustc_target::spec::abi::Abi::*;
2653         let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2654             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2655
2656             // It's the ABI's job to select this, not ours.
2657             System => bug!("system abi should be selected elsewhere"),
2658             EfiApi => bug!("eficall abi should be selected elsewhere"),
2659
2660             Stdcall => Conv::X86Stdcall,
2661             Fastcall => Conv::X86Fastcall,
2662             Vectorcall => Conv::X86VectorCall,
2663             Thiscall => Conv::X86ThisCall,
2664             C => Conv::C,
2665             Unadjusted => Conv::C,
2666             Win64 => Conv::X86_64Win64,
2667             SysV64 => Conv::X86_64SysV,
2668             Aapcs => Conv::ArmAapcs,
2669             CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2670             PtxKernel => Conv::PtxKernel,
2671             Msp430Interrupt => Conv::Msp430Intr,
2672             X86Interrupt => Conv::X86Intr,
2673             AmdGpuKernel => Conv::AmdGpuKernel,
2674             AvrInterrupt => Conv::AvrInterrupt,
2675             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2676
2677             // These API constants ought to be more specific...
2678             Cdecl => Conv::C,
2679         };
2680
2681         let mut inputs = sig.inputs();
2682         let extra_args = if sig.abi == RustCall {
2683             assert!(!sig.c_variadic && extra_args.is_empty());
2684
2685             if let Some(input) = sig.inputs().last() {
2686                 if let ty::Tuple(tupled_arguments) = input.kind() {
2687                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2688                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2689                 } else {
2690                     bug!(
2691                         "argument to function with \"rust-call\" ABI \
2692                             is not a tuple"
2693                     );
2694                 }
2695             } else {
2696                 bug!(
2697                     "argument to function with \"rust-call\" ABI \
2698                         is not a tuple"
2699                 );
2700             }
2701         } else {
2702             assert!(sig.c_variadic || extra_args.is_empty());
2703             extra_args.to_vec()
2704         };
2705
2706         let target = &cx.tcx().sess.target;
2707         let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2708         let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2709         let linux_s390x_gnu_like =
2710             target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2711         let linux_sparc64_gnu_like =
2712             target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2713         let linux_powerpc_gnu_like =
2714             target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2715         let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2716
2717         // Handle safe Rust thin and fat pointers.
2718         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2719                                       scalar: &Scalar,
2720                                       layout: TyAndLayout<'tcx>,
2721                                       offset: Size,
2722                                       is_return: bool| {
2723             // Booleans are always an i1 that needs to be zero-extended.
2724             if scalar.is_bool() {
2725                 attrs.ext(ArgExtension::Zext);
2726                 return;
2727             }
2728
2729             // Only pointer types handled below.
2730             if scalar.value != Pointer {
2731                 return;
2732             }
2733
2734             if scalar.valid_range.start() < scalar.valid_range.end() {
2735                 if *scalar.valid_range.start() > 0 {
2736                     attrs.set(ArgAttribute::NonNull);
2737                 }
2738             }
2739
2740             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2741                 if let Some(kind) = pointee.safe {
2742                     attrs.pointee_align = Some(pointee.align);
2743
2744                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2745                     // for the entire duration of the function as they can be deallocated
2746                     // at any time. Set their valid size to 0.
2747                     attrs.pointee_size = match kind {
2748                         PointerKind::UniqueOwned => Size::ZERO,
2749                         _ => pointee.size,
2750                     };
2751
2752                     // `Box` pointer parameters never alias because ownership is transferred
2753                     // `&mut` pointer parameters never alias other parameters,
2754                     // or mutable global data
2755                     //
2756                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2757                     // and can be marked as both `readonly` and `noalias`, as
2758                     // LLVM's definition of `noalias` is based solely on memory
2759                     // dependencies rather than pointer equality
2760                     let no_alias = match kind {
2761                         PointerKind::Shared => false,
2762                         PointerKind::UniqueOwned => true,
2763                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2764                     };
2765                     if no_alias {
2766                         attrs.set(ArgAttribute::NoAlias);
2767                     }
2768
2769                     if kind == PointerKind::Frozen && !is_return {
2770                         attrs.set(ArgAttribute::ReadOnly);
2771                     }
2772                 }
2773             }
2774         };
2775
2776         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2777             let is_return = arg_idx.is_none();
2778
2779             let layout = cx.layout_of(ty);
2780             let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2781                 // Don't pass the vtable, it's not an argument of the virtual fn.
2782                 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2783                 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2784                 make_thin_self_ptr(cx, layout)
2785             } else {
2786                 layout
2787             };
2788
2789             let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2790                 let mut attrs = ArgAttributes::new();
2791                 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2792                 attrs
2793             });
2794
2795             if arg.layout.is_zst() {
2796                 // For some forsaken reason, x86_64-pc-windows-gnu
2797                 // doesn't ignore zero-sized struct arguments.
2798                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2799                 if is_return
2800                     || rust_abi
2801                     || (!win_x64_gnu
2802                         && !linux_s390x_gnu_like
2803                         && !linux_sparc64_gnu_like
2804                         && !linux_powerpc_gnu_like)
2805                 {
2806                     arg.mode = PassMode::Ignore;
2807                 }
2808             }
2809
2810             arg
2811         };
2812
2813         let mut fn_abi = FnAbi {
2814             ret: arg_of(sig.output(), None),
2815             args: inputs
2816                 .iter()
2817                 .cloned()
2818                 .chain(extra_args)
2819                 .chain(caller_location)
2820                 .enumerate()
2821                 .map(|(i, ty)| arg_of(ty, Some(i)))
2822                 .collect(),
2823             c_variadic: sig.c_variadic,
2824             fixed_count: inputs.len(),
2825             conv,
2826             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2827         };
2828         fn_abi.adjust_for_abi(cx, sig.abi);
2829         debug!("FnAbi::new_internal = {:?}", fn_abi);
2830         fn_abi
2831     }
2832
2833     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2834         if abi == SpecAbi::Unadjusted {
2835             return;
2836         }
2837
2838         if abi == SpecAbi::Rust
2839             || abi == SpecAbi::RustCall
2840             || abi == SpecAbi::RustIntrinsic
2841             || abi == SpecAbi::PlatformIntrinsic
2842         {
2843             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2844                 if arg.is_ignore() {
2845                     return;
2846                 }
2847
2848                 match arg.layout.abi {
2849                     Abi::Aggregate { .. } => {}
2850
2851                     // This is a fun case! The gist of what this is doing is
2852                     // that we want callers and callees to always agree on the
2853                     // ABI of how they pass SIMD arguments. If we were to *not*
2854                     // make these arguments indirect then they'd be immediates
2855                     // in LLVM, which means that they'd used whatever the
2856                     // appropriate ABI is for the callee and the caller. That
2857                     // means, for example, if the caller doesn't have AVX
2858                     // enabled but the callee does, then passing an AVX argument
2859                     // across this boundary would cause corrupt data to show up.
2860                     //
2861                     // This problem is fixed by unconditionally passing SIMD
2862                     // arguments through memory between callers and callees
2863                     // which should get them all to agree on ABI regardless of
2864                     // target feature sets. Some more information about this
2865                     // issue can be found in #44367.
2866                     //
2867                     // Note that the platform intrinsic ABI is exempt here as
2868                     // that's how we connect up to LLVM and it's unstable
2869                     // anyway, we control all calls to it in libstd.
2870                     Abi::Vector { .. }
2871                         if abi != SpecAbi::PlatformIntrinsic
2872                             && cx.tcx().sess.target.simd_types_indirect =>
2873                     {
2874                         arg.make_indirect();
2875                         return;
2876                     }
2877
2878                     _ => return,
2879                 }
2880
2881                 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2882                 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2883                 let max_by_val_size = Pointer.size(cx) * 2;
2884                 let size = arg.layout.size;
2885
2886                 if arg.layout.is_unsized() || size > max_by_val_size {
2887                     arg.make_indirect();
2888                 } else {
2889                     // We want to pass small aggregates as immediates, but using
2890                     // a LLVM aggregate type for this leads to bad optimizations,
2891                     // so we pick an appropriately sized integer type instead.
2892                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2893                 }
2894             };
2895             fixup(&mut self.ret);
2896             for arg in &mut self.args {
2897                 fixup(arg);
2898             }
2899             return;
2900         }
2901
2902         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2903             cx.tcx().sess.fatal(&msg);
2904         }
2905     }
2906 }
2907
2908 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
2909 where
2910     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2911         + HasTyCtxt<'tcx>
2912         + HasParamEnv<'tcx>,
2913 {
2914     let fat_pointer_ty = if layout.is_unsized() {
2915         // unsized `self` is passed as a pointer to `self`
2916         // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2917         cx.tcx().mk_mut_ptr(layout.ty)
2918     } else {
2919         match layout.abi {
2920             Abi::ScalarPair(..) => (),
2921             _ => bug!("receiver type has unsupported layout: {:?}", layout),
2922         }
2923
2924         // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2925         // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2926         // elsewhere in the compiler as a method on a `dyn Trait`.
2927         // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2928         // get a built-in pointer type
2929         let mut fat_pointer_layout = layout;
2930         'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2931             && !fat_pointer_layout.ty.is_region_ptr()
2932         {
2933             for i in 0..fat_pointer_layout.fields.count() {
2934                 let field_layout = fat_pointer_layout.field(cx, i);
2935
2936                 if !field_layout.is_zst() {
2937                     fat_pointer_layout = field_layout;
2938                     continue 'descend_newtypes;
2939                 }
2940             }
2941
2942             bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2943         }
2944
2945         fat_pointer_layout.ty
2946     };
2947
2948     // we now have a type like `*mut RcBox<dyn Trait>`
2949     // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2950     // this is understood as a special case elsewhere in the compiler
2951     let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2952     layout = cx.layout_of(unit_pointer_ty);
2953     layout.ty = fat_pointer_ty;
2954     layout
2955 }