]> git.lizzy.rs Git - rust.git/blob - src/librustc_middle/ty/layout.rs
Make is_freeze and is_copy_modulo_regions take TyCtxtAt
[rust.git] / src / librustc_middle / ty / layout.rs
1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
6
7 use rustc_ast::ast::{self, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
10 use rustc_hir as hir;
11 use rustc_hir::lang_items::{GeneratorStateLangItem, PinTypeLangItem};
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18     ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
19 };
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23 use std::cmp;
24 use std::fmt;
25 use std::iter;
26 use std::mem;
27 use std::num::NonZeroUsize;
28 use std::ops::Bound;
29
30 pub trait IntegerExt {
31     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'tcx>(
34         tcx: TyCtxt<'tcx>,
35         ty: Ty<'tcx>,
36         repr: &ReprOptions,
37         min: i128,
38         max: i128,
39     ) -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Gets the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Finds the appropriate Integer type and signedness for the given
75     /// signed discriminant range and `#[repr]` attribute.
76     /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'tcx>(
79         tcx: TyCtxt<'tcx>,
80         ty: Ty<'tcx>,
81         repr: &ReprOptions,
82         min: i128,
83         max: i128,
84     ) -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!(
100                     "Integer::repr_discr: `#[repr]` hint too small for \
101                       discriminant range of enum `{}",
102                     ty
103                 )
104             }
105             return (discr, ity.is_signed());
106         }
107
108         if repr.c() {
109             match &tcx.sess.target.target.arch[..] {
110                 // WARNING: the ARM EABI has two variants; the one corresponding
111                 // to `at_least == I32` appears to be used on Linux and NetBSD,
112                 // but some systems may use the variant corresponding to no
113                 // lower bound. However, we don't run on those yet...?
114                 "arm" => min_from_extern = Some(I32),
115                 _ => min_from_extern = Some(I32),
116             }
117         }
118
119         let at_least = min_from_extern.unwrap_or(min_default);
120
121         // If there are no negative values, we can use the unsigned fit.
122         if min >= 0 {
123             (cmp::max(unsigned_fit, at_least), false)
124         } else {
125             (cmp::max(signed_fit, at_least), true)
126         }
127     }
128 }
129
130 pub trait PrimitiveExt {
131     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
133 }
134
135 impl PrimitiveExt for Primitive {
136     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
137         match *self {
138             Int(i, signed) => i.to_ty(tcx, signed),
139             F32 => tcx.types.f32,
140             F64 => tcx.types.f64,
141             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142         }
143     }
144
145     /// Return an *integer* type matching this primitive.
146     /// Useful in particular when dealing with enum discriminants.
147     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148         match *self {
149             Int(i, signed) => i.to_ty(tcx, signed),
150             Pointer => tcx.types.usize,
151             F32 | F64 => bug!("floats do not have an int type"),
152         }
153     }
154 }
155
156 /// The first half of a fat pointer.
157 ///
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
161
162 /// The second half of a fat pointer.
163 ///
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
167
168 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
169 pub enum LayoutError<'tcx> {
170     Unknown(Ty<'tcx>),
171     SizeOverflow(Ty<'tcx>),
172 }
173
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
176         match *self {
177             LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
178             LayoutError::SizeOverflow(ty) => {
179                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
180             }
181         }
182     }
183 }
184
185 fn layout_raw<'tcx>(
186     tcx: TyCtxt<'tcx>,
187     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189     ty::tls::with_related_context(tcx, move |icx| {
190         let (param_env, ty) = query.into_parts();
191
192         if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
194         }
195
196         // Update the ImplicitCtxt to increase the layout_depth
197         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
198
199         ty::tls::enter_context(&icx, |_| {
200             let cx = LayoutCx { tcx, param_env };
201             let layout = cx.layout_raw_uncached(ty);
202             // Type-level uninhabitedness should always imply ABI uninhabitedness.
203             if let Ok(layout) = layout {
204                 if ty.conservative_is_privately_uninhabited(tcx) {
205                     assert!(layout.abi.is_uninhabited());
206                 }
207             }
208             layout
209         })
210     })
211 }
212
213 pub fn provide(providers: &mut ty::query::Providers<'_>) {
214     *providers = ty::query::Providers { layout_raw, ..*providers };
215 }
216
217 pub struct LayoutCx<'tcx, C> {
218     pub tcx: C,
219     pub param_env: ty::ParamEnv<'tcx>,
220 }
221
222 #[derive(Copy, Clone, Debug)]
223 enum StructKind {
224     /// A tuple, closure, or univariant which cannot be coerced to unsized.
225     AlwaysSized,
226     /// A univariant, the last field of which may be coerced to unsized.
227     MaybeUnsized,
228     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229     Prefixed(Size, Align),
230 }
231
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238     let mut inverse = vec![0; map.len()];
239     for i in 0..map.len() {
240         inverse[map[i] as usize] = i as u32;
241     }
242     inverse
243 }
244
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246     fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247         let dl = self.data_layout();
248         let b_align = b.value.align(dl);
249         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250         let b_offset = a.value.size(dl).align_to(b_align.abi);
251         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
252
253         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254         // returns the last maximum.
255         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256             .into_iter()
257             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258             .max_by_key(|niche| niche.available(dl));
259
260         Layout {
261             variants: Variants::Single { index: VariantIdx::new(0) },
262             fields: FieldsShape::Arbitrary {
263                 offsets: vec![Size::ZERO, b_offset],
264                 memory_index: vec![0, 1],
265             },
266             abi: Abi::ScalarPair(a, b),
267             largest_niche,
268             align,
269             size,
270         }
271     }
272
273     fn univariant_uninterned(
274         &self,
275         ty: Ty<'tcx>,
276         fields: &[TyAndLayout<'_>],
277         repr: &ReprOptions,
278         kind: StructKind,
279     ) -> Result<Layout, LayoutError<'tcx>> {
280         let dl = self.data_layout();
281         let pack = repr.pack;
282         if pack.is_some() && repr.align.is_some() {
283             bug!("struct cannot be packed and aligned");
284         }
285
286         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
287
288         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
289
290         let optimize = !repr.inhibit_struct_field_reordering_opt();
291         if optimize {
292             let end =
293                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294             let optimizing = &mut inverse_memory_index[..end];
295             let field_align = |f: &TyAndLayout<'_>| {
296                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
297             };
298             match kind {
299                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300                     optimizing.sort_by_key(|&x| {
301                         // Place ZSTs first to avoid "interesting offsets",
302                         // especially with only one or two non-ZST fields.
303                         let f = &fields[x as usize];
304                         (!f.is_zst(), cmp::Reverse(field_align(f)))
305                     });
306                 }
307                 StructKind::Prefixed(..) => {
308                     // Sort in ascending alignment so that the layout stay optimal
309                     // regardless of the prefix
310                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
311                 }
312             }
313         }
314
315         // inverse_memory_index holds field indices by increasing memory offset.
316         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317         // We now write field offsets to the corresponding offset slot;
318         // field 5 with offset 0 puts 0 in offsets[5].
319         // At the bottom of this function, we invert `inverse_memory_index` to
320         // produce `memory_index` (see `invert_mapping`).
321
322         let mut sized = true;
323         let mut offsets = vec![Size::ZERO; fields.len()];
324         let mut offset = Size::ZERO;
325         let mut largest_niche = None;
326         let mut largest_niche_available = 0;
327
328         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
329             let prefix_align =
330                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331             align = align.max(AbiAndPrefAlign::new(prefix_align));
332             offset = prefix_size.align_to(prefix_align);
333         }
334
335         for &i in &inverse_memory_index {
336             let field = fields[i as usize];
337             if !sized {
338                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
339             }
340
341             if field.is_unsized() {
342                 sized = false;
343             }
344
345             // Invariant: offset < dl.obj_size_bound() <= 1<<61
346             let field_align = if let Some(pack) = pack {
347                 field.align.min(AbiAndPrefAlign::new(pack))
348             } else {
349                 field.align
350             };
351             offset = offset.align_to(field_align.abi);
352             align = align.max(field_align);
353
354             debug!("univariant offset: {:?} field: {:#?}", offset, field);
355             offsets[i as usize] = offset;
356
357             if !repr.hide_niche() {
358                 if let Some(mut niche) = field.largest_niche.clone() {
359                     let available = niche.available(dl);
360                     if available > largest_niche_available {
361                         largest_niche_available = available;
362                         niche.offset += offset;
363                         largest_niche = Some(niche);
364                     }
365                 }
366             }
367
368             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
369         }
370
371         if let Some(repr_align) = repr.align {
372             align = align.max(AbiAndPrefAlign::new(repr_align));
373         }
374
375         debug!("univariant min_size: {:?}", offset);
376         let min_size = offset;
377
378         // As stated above, inverse_memory_index holds field indices by increasing offset.
379         // This makes it an already-sorted view of the offsets vec.
380         // To invert it, consider:
381         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382         // Field 5 would be the first element, so memory_index is i:
383         // Note: if we didn't optimize, it's already right.
384
385         let memory_index =
386             if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
387
388         let size = min_size.align_to(align.abi);
389         let mut abi = Abi::Aggregate { sized };
390
391         // Unpack newtype ABIs and find scalar pairs.
392         if sized && size.bytes() > 0 {
393             // All other fields must be ZSTs, and we need them to all start at 0.
394             let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
395             if zst_offsets.all(|(_, o)| o.bytes() == 0) {
396                 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
397
398                 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
399                     // We have exactly one non-ZST field.
400                     (Some((i, field)), None, None) => {
401                         // Field fills the struct and it has a scalar or scalar pair ABI.
402                         if offsets[i].bytes() == 0
403                             && align.abi == field.align.abi
404                             && size == field.size
405                         {
406                             match field.abi {
407                                 // For plain scalars, or vectors of them, we can't unpack
408                                 // newtypes for `#[repr(C)]`, as that affects C ABIs.
409                                 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
410                                     abi = field.abi.clone();
411                                 }
412                                 // But scalar pairs are Rust-specific and get
413                                 // treated as aggregates by C ABIs anyway.
414                                 Abi::ScalarPair(..) => {
415                                     abi = field.abi.clone();
416                                 }
417                                 _ => {}
418                             }
419                         }
420                     }
421
422                     // Two non-ZST fields, and they're both scalars.
423                     (
424                         Some((
425                             i,
426                             &TyAndLayout {
427                                 layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
428                             },
429                         )),
430                         Some((
431                             j,
432                             &TyAndLayout {
433                                 layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
434                             },
435                         )),
436                         None,
437                     ) => {
438                         // Order by the memory placement, not source order.
439                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
440                             ((i, a), (j, b))
441                         } else {
442                             ((j, b), (i, a))
443                         };
444                         let pair = self.scalar_pair(a.clone(), b.clone());
445                         let pair_offsets = match pair.fields {
446                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
447                                 assert_eq!(memory_index, &[0, 1]);
448                                 offsets
449                             }
450                             _ => bug!(),
451                         };
452                         if offsets[i] == pair_offsets[0]
453                             && offsets[j] == pair_offsets[1]
454                             && align == pair.align
455                             && size == pair.size
456                         {
457                             // We can use `ScalarPair` only when it matches our
458                             // already computed layout (including `#[repr(C)]`).
459                             abi = pair.abi;
460                         }
461                     }
462
463                     _ => {}
464                 }
465             }
466         }
467
468         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
469             abi = Abi::Uninhabited;
470         }
471
472         Ok(Layout {
473             variants: Variants::Single { index: VariantIdx::new(0) },
474             fields: FieldsShape::Arbitrary { offsets, memory_index },
475             abi,
476             largest_niche,
477             align,
478             size,
479         })
480     }
481
482     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
483         let tcx = self.tcx;
484         let param_env = self.param_env;
485         let dl = self.data_layout();
486         let scalar_unit = |value: Primitive| {
487             let bits = value.size(dl).bits();
488             assert!(bits <= 128);
489             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
490         };
491         let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
492
493         let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
494             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
495         };
496         debug_assert!(!ty.has_infer_types_or_consts());
497
498         Ok(match ty.kind {
499             // Basic scalars.
500             ty::Bool => tcx.intern_layout(Layout::scalar(
501                 self,
502                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
503             )),
504             ty::Char => tcx.intern_layout(Layout::scalar(
505                 self,
506                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
507             )),
508             ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
509             ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
510             ty::Float(fty) => scalar(match fty {
511                 ast::FloatTy::F32 => F32,
512                 ast::FloatTy::F64 => F64,
513             }),
514             ty::FnPtr(_) => {
515                 let mut ptr = scalar_unit(Pointer);
516                 ptr.valid_range = 1..=*ptr.valid_range.end();
517                 tcx.intern_layout(Layout::scalar(self, ptr))
518             }
519
520             // The never type.
521             ty::Never => tcx.intern_layout(Layout {
522                 variants: Variants::Single { index: VariantIdx::new(0) },
523                 fields: FieldsShape::Primitive,
524                 abi: Abi::Uninhabited,
525                 largest_niche: None,
526                 align: dl.i8_align,
527                 size: Size::ZERO,
528             }),
529
530             // Potentially-fat pointers.
531             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
532                 let mut data_ptr = scalar_unit(Pointer);
533                 if !ty.is_unsafe_ptr() {
534                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
535                 }
536
537                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
538                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
539                     return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
540                 }
541
542                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
543                 let metadata = match unsized_part.kind {
544                     ty::Foreign(..) => {
545                         return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
546                     }
547                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
548                     ty::Dynamic(..) => {
549                         let mut vtable = scalar_unit(Pointer);
550                         vtable.valid_range = 1..=*vtable.valid_range.end();
551                         vtable
552                     }
553                     _ => return Err(LayoutError::Unknown(unsized_part)),
554                 };
555
556                 // Effectively a (ptr, meta) tuple.
557                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
558             }
559
560             // Arrays and slices.
561             ty::Array(element, mut count) => {
562                 if count.has_projections() {
563                     count = tcx.normalize_erasing_regions(param_env, count);
564                     if count.has_projections() {
565                         return Err(LayoutError::Unknown(ty));
566                     }
567                 }
568
569                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
570                 let element = self.layout_of(element)?;
571                 let size =
572                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
573
574                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
575                     Abi::Uninhabited
576                 } else {
577                     Abi::Aggregate { sized: true }
578                 };
579
580                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
581
582                 tcx.intern_layout(Layout {
583                     variants: Variants::Single { index: VariantIdx::new(0) },
584                     fields: FieldsShape::Array { stride: element.size, count },
585                     abi,
586                     largest_niche,
587                     align: element.align,
588                     size,
589                 })
590             }
591             ty::Slice(element) => {
592                 let element = self.layout_of(element)?;
593                 tcx.intern_layout(Layout {
594                     variants: Variants::Single { index: VariantIdx::new(0) },
595                     fields: FieldsShape::Array { stride: element.size, count: 0 },
596                     abi: Abi::Aggregate { sized: false },
597                     largest_niche: None,
598                     align: element.align,
599                     size: Size::ZERO,
600                 })
601             }
602             ty::Str => tcx.intern_layout(Layout {
603                 variants: Variants::Single { index: VariantIdx::new(0) },
604                 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
605                 abi: Abi::Aggregate { sized: false },
606                 largest_niche: None,
607                 align: dl.i8_align,
608                 size: Size::ZERO,
609             }),
610
611             // Odd unit types.
612             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
613             ty::Dynamic(..) | ty::Foreign(..) => {
614                 let mut unit = self.univariant_uninterned(
615                     ty,
616                     &[],
617                     &ReprOptions::default(),
618                     StructKind::AlwaysSized,
619                 )?;
620                 match unit.abi {
621                     Abi::Aggregate { ref mut sized } => *sized = false,
622                     _ => bug!(),
623                 }
624                 tcx.intern_layout(unit)
625             }
626
627             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
628
629             ty::Closure(_, ref substs) => {
630                 let tys = substs.as_closure().upvar_tys();
631                 univariant(
632                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
633                     &ReprOptions::default(),
634                     StructKind::AlwaysSized,
635                 )?
636             }
637
638             ty::Tuple(tys) => {
639                 let kind =
640                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
641
642                 univariant(
643                     &tys.iter()
644                         .map(|k| self.layout_of(k.expect_ty()))
645                         .collect::<Result<Vec<_>, _>>()?,
646                     &ReprOptions::default(),
647                     kind,
648                 )?
649             }
650
651             // SIMD vector types.
652             ty::Adt(def, ..) if def.repr.simd() => {
653                 let element = self.layout_of(ty.simd_type(tcx))?;
654                 let count = ty.simd_size(tcx);
655                 assert!(count > 0);
656                 let scalar = match element.abi {
657                     Abi::Scalar(ref scalar) => scalar.clone(),
658                     _ => {
659                         tcx.sess.fatal(&format!(
660                             "monomorphising SIMD type `{}` with \
661                                                  a non-machine element type `{}`",
662                             ty, element.ty
663                         ));
664                     }
665                 };
666                 let size =
667                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
668                 let align = dl.vector_align(size);
669                 let size = size.align_to(align.abi);
670
671                 tcx.intern_layout(Layout {
672                     variants: Variants::Single { index: VariantIdx::new(0) },
673                     fields: FieldsShape::Array { stride: element.size, count },
674                     abi: Abi::Vector { element: scalar, count },
675                     largest_niche: element.largest_niche.clone(),
676                     size,
677                     align,
678                 })
679             }
680
681             // ADTs.
682             ty::Adt(def, substs) => {
683                 // Cache the field layouts.
684                 let variants = def
685                     .variants
686                     .iter()
687                     .map(|v| {
688                         v.fields
689                             .iter()
690                             .map(|field| self.layout_of(field.ty(tcx, substs)))
691                             .collect::<Result<Vec<_>, _>>()
692                     })
693                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
694
695                 if def.is_union() {
696                     if def.repr.pack.is_some() && def.repr.align.is_some() {
697                         bug!("union cannot be packed and aligned");
698                     }
699
700                     let mut align =
701                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
702
703                     if let Some(repr_align) = def.repr.align {
704                         align = align.max(AbiAndPrefAlign::new(repr_align));
705                     }
706
707                     let optimize = !def.repr.inhibit_union_abi_opt();
708                     let mut size = Size::ZERO;
709                     let mut abi = Abi::Aggregate { sized: true };
710                     let index = VariantIdx::new(0);
711                     for field in &variants[index] {
712                         assert!(!field.is_unsized());
713                         align = align.max(field.align);
714
715                         // If all non-ZST fields have the same ABI, forward this ABI
716                         if optimize && !field.is_zst() {
717                             // Normalize scalar_unit to the maximal valid range
718                             let field_abi = match &field.abi {
719                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
720                                 Abi::ScalarPair(x, y) => {
721                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
722                                 }
723                                 Abi::Vector { element: x, count } => {
724                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
725                                 }
726                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
727                                     Abi::Aggregate { sized: true }
728                                 }
729                             };
730
731                             if size == Size::ZERO {
732                                 // first non ZST: initialize 'abi'
733                                 abi = field_abi;
734                             } else if abi != field_abi {
735                                 // different fields have different ABI: reset to Aggregate
736                                 abi = Abi::Aggregate { sized: true };
737                             }
738                         }
739
740                         size = cmp::max(size, field.size);
741                     }
742
743                     if let Some(pack) = def.repr.pack {
744                         align = align.min(AbiAndPrefAlign::new(pack));
745                     }
746
747                     return Ok(tcx.intern_layout(Layout {
748                         variants: Variants::Single { index },
749                         fields: FieldsShape::Union(
750                             NonZeroUsize::new(variants[index].len())
751                                 .ok_or(LayoutError::Unknown(ty))?,
752                         ),
753                         abi,
754                         largest_niche: None,
755                         align,
756                         size: size.align_to(align.abi),
757                     }));
758                 }
759
760                 // A variant is absent if it's uninhabited and only has ZST fields.
761                 // Present uninhabited variants only require space for their fields,
762                 // but *not* an encoding of the discriminant (e.g., a tag value).
763                 // See issue #49298 for more details on the need to leave space
764                 // for non-ZST uninhabited data (mostly partial initialization).
765                 let absent = |fields: &[TyAndLayout<'_>]| {
766                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
767                     let is_zst = fields.iter().all(|f| f.is_zst());
768                     uninhabited && is_zst
769                 };
770                 let (present_first, present_second) = {
771                     let mut present_variants = variants
772                         .iter_enumerated()
773                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
774                     (present_variants.next(), present_variants.next())
775                 };
776                 let present_first = match present_first {
777                     present_first @ Some(_) => present_first,
778                     // Uninhabited because it has no variants, or only absent ones.
779                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
780                     // If it's a struct, still compute a layout so that we can still compute the
781                     // field offsets.
782                     None => Some(VariantIdx::new(0)),
783                 };
784
785                 let is_struct = !def.is_enum() ||
786                     // Only one variant is present.
787                     (present_second.is_none() &&
788                     // Representation optimizations are allowed.
789                     !def.repr.inhibit_enum_layout_opt());
790                 if is_struct {
791                     // Struct, or univariant enum equivalent to a struct.
792                     // (Typechecking will reject discriminant-sizing attrs.)
793
794                     let v = present_first.unwrap();
795                     let kind = if def.is_enum() || variants[v].is_empty() {
796                         StructKind::AlwaysSized
797                     } else {
798                         let param_env = tcx.param_env(def.did);
799                         let last_field = def.variants[v].fields.last().unwrap();
800                         let always_sized =
801                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
802                         if !always_sized {
803                             StructKind::MaybeUnsized
804                         } else {
805                             StructKind::AlwaysSized
806                         }
807                     };
808
809                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
810                     st.variants = Variants::Single { index: v };
811                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
812                     match st.abi {
813                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
814                             // the asserts ensure that we are not using the
815                             // `#[rustc_layout_scalar_valid_range(n)]`
816                             // attribute to widen the range of anything as that would probably
817                             // result in UB somewhere
818                             // FIXME(eddyb) the asserts are probably not needed,
819                             // as larger validity ranges would result in missed
820                             // optimizations, *not* wrongly assuming the inner
821                             // value is valid. e.g. unions enlarge validity ranges,
822                             // because the values may be uninitialized.
823                             if let Bound::Included(start) = start {
824                                 // FIXME(eddyb) this might be incorrect - it doesn't
825                                 // account for wrap-around (end < start) ranges.
826                                 assert!(*scalar.valid_range.start() <= start);
827                                 scalar.valid_range = start..=*scalar.valid_range.end();
828                             }
829                             if let Bound::Included(end) = end {
830                                 // FIXME(eddyb) this might be incorrect - it doesn't
831                                 // account for wrap-around (end < start) ranges.
832                                 assert!(*scalar.valid_range.end() >= end);
833                                 scalar.valid_range = *scalar.valid_range.start()..=end;
834                             }
835
836                             // Update `largest_niche` if we have introduced a larger niche.
837                             let niche = if def.repr.hide_niche() {
838                                 None
839                             } else {
840                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
841                             };
842                             if let Some(niche) = niche {
843                                 match &st.largest_niche {
844                                     Some(largest_niche) => {
845                                         // Replace the existing niche even if they're equal,
846                                         // because this one is at a lower offset.
847                                         if largest_niche.available(dl) <= niche.available(dl) {
848                                             st.largest_niche = Some(niche);
849                                         }
850                                     }
851                                     None => st.largest_niche = Some(niche),
852                                 }
853                             }
854                         }
855                         _ => assert!(
856                             start == Bound::Unbounded && end == Bound::Unbounded,
857                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
858                             def,
859                             st,
860                         ),
861                     }
862
863                     return Ok(tcx.intern_layout(st));
864                 }
865
866                 // At this point, we have handled all unions and
867                 // structs. (We have also handled univariant enums
868                 // that allow representation optimization.)
869                 assert!(def.is_enum());
870
871                 // The current code for niche-filling relies on variant indices
872                 // instead of actual discriminants, so dataful enums with
873                 // explicit discriminants (RFC #2363) would misbehave.
874                 let no_explicit_discriminants = def
875                     .variants
876                     .iter_enumerated()
877                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
878
879                 // Niche-filling enum optimization.
880                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
881                     let mut dataful_variant = None;
882                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
883
884                     // Find one non-ZST variant.
885                     'variants: for (v, fields) in variants.iter_enumerated() {
886                         if absent(fields) {
887                             continue 'variants;
888                         }
889                         for f in fields {
890                             if !f.is_zst() {
891                                 if dataful_variant.is_none() {
892                                     dataful_variant = Some(v);
893                                     continue 'variants;
894                                 } else {
895                                     dataful_variant = None;
896                                     break 'variants;
897                                 }
898                             }
899                         }
900                         niche_variants = *niche_variants.start().min(&v)..=v;
901                     }
902
903                     if niche_variants.start() > niche_variants.end() {
904                         dataful_variant = None;
905                     }
906
907                     if let Some(i) = dataful_variant {
908                         let count = (niche_variants.end().as_u32()
909                             - niche_variants.start().as_u32()
910                             + 1) as u128;
911
912                         // Find the field with the largest niche
913                         let niche_candidate = variants[i]
914                             .iter()
915                             .enumerate()
916                             .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
917                             .max_by_key(|(_, niche)| niche.available(dl));
918
919                         if let Some((field_index, niche, (niche_start, niche_scalar))) =
920                             niche_candidate.and_then(|(field_index, niche)| {
921                                 Some((field_index, niche, niche.reserve(self, count)?))
922                             })
923                         {
924                             let mut align = dl.aggregate_align;
925                             let st = variants
926                                 .iter_enumerated()
927                                 .map(|(j, v)| {
928                                     let mut st = self.univariant_uninterned(
929                                         ty,
930                                         v,
931                                         &def.repr,
932                                         StructKind::AlwaysSized,
933                                     )?;
934                                     st.variants = Variants::Single { index: j };
935
936                                     align = align.max(st.align);
937
938                                     Ok(st)
939                                 })
940                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
941
942                             let offset = st[i].fields.offset(field_index) + niche.offset;
943                             let size = st[i].size;
944
945                             let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
946                                 Abi::Uninhabited
947                             } else {
948                                 match st[i].abi {
949                                     Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
950                                     Abi::ScalarPair(ref first, ref second) => {
951                                         // We need to use scalar_unit to reset the
952                                         // valid range to the maximal one for that
953                                         // primitive, because only the niche is
954                                         // guaranteed to be initialised, not the
955                                         // other primitive.
956                                         if offset.bytes() == 0 {
957                                             Abi::ScalarPair(
958                                                 niche_scalar.clone(),
959                                                 scalar_unit(second.value),
960                                             )
961                                         } else {
962                                             Abi::ScalarPair(
963                                                 scalar_unit(first.value),
964                                                 niche_scalar.clone(),
965                                             )
966                                         }
967                                     }
968                                     _ => Abi::Aggregate { sized: true },
969                                 }
970                             };
971
972                             let largest_niche =
973                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
974
975                             return Ok(tcx.intern_layout(Layout {
976                                 variants: Variants::Multiple {
977                                     tag: niche_scalar,
978                                     tag_encoding: TagEncoding::Niche {
979                                         dataful_variant: i,
980                                         niche_variants,
981                                         niche_start,
982                                     },
983                                     tag_field: 0,
984                                     variants: st,
985                                 },
986                                 fields: FieldsShape::Arbitrary {
987                                     offsets: vec![offset],
988                                     memory_index: vec![0],
989                                 },
990                                 abi,
991                                 largest_niche,
992                                 size,
993                                 align,
994                             }));
995                         }
996                     }
997                 }
998
999                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1000                 let discr_type = def.repr.discr_type();
1001                 let bits = Integer::from_attr(self, discr_type).size().bits();
1002                 for (i, discr) in def.discriminants(tcx) {
1003                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1004                         continue;
1005                     }
1006                     let mut x = discr.val as i128;
1007                     if discr_type.is_signed() {
1008                         // sign extend the raw representation to be an i128
1009                         x = (x << (128 - bits)) >> (128 - bits);
1010                     }
1011                     if x < min {
1012                         min = x;
1013                     }
1014                     if x > max {
1015                         max = x;
1016                     }
1017                 }
1018                 // We might have no inhabited variants, so pretend there's at least one.
1019                 if (min, max) == (i128::MAX, i128::MIN) {
1020                     min = 0;
1021                     max = 0;
1022                 }
1023                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1024                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1025
1026                 let mut align = dl.aggregate_align;
1027                 let mut size = Size::ZERO;
1028
1029                 // We're interested in the smallest alignment, so start large.
1030                 let mut start_align = Align::from_bytes(256).unwrap();
1031                 assert_eq!(Integer::for_align(dl, start_align), None);
1032
1033                 // repr(C) on an enum tells us to make a (tag, union) layout,
1034                 // so we need to grow the prefix alignment to be at least
1035                 // the alignment of the union. (This value is used both for
1036                 // determining the alignment of the overall enum, and the
1037                 // determining the alignment of the payload after the tag.)
1038                 let mut prefix_align = min_ity.align(dl).abi;
1039                 if def.repr.c() {
1040                     for fields in &variants {
1041                         for field in fields {
1042                             prefix_align = prefix_align.max(field.align.abi);
1043                         }
1044                     }
1045                 }
1046
1047                 // Create the set of structs that represent each variant.
1048                 let mut layout_variants = variants
1049                     .iter_enumerated()
1050                     .map(|(i, field_layouts)| {
1051                         let mut st = self.univariant_uninterned(
1052                             ty,
1053                             &field_layouts,
1054                             &def.repr,
1055                             StructKind::Prefixed(min_ity.size(), prefix_align),
1056                         )?;
1057                         st.variants = Variants::Single { index: i };
1058                         // Find the first field we can't move later
1059                         // to make room for a larger discriminant.
1060                         for field in
1061                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1062                         {
1063                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1064                                 start_align = start_align.min(field.align.abi);
1065                                 break;
1066                             }
1067                         }
1068                         size = cmp::max(size, st.size);
1069                         align = align.max(st.align);
1070                         Ok(st)
1071                     })
1072                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1073
1074                 // Align the maximum variant size to the largest alignment.
1075                 size = size.align_to(align.abi);
1076
1077                 if size.bytes() >= dl.obj_size_bound() {
1078                     return Err(LayoutError::SizeOverflow(ty));
1079                 }
1080
1081                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1082                 if typeck_ity < min_ity {
1083                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1084                     // some reason at this point (based on values discriminant can take on). Mostly
1085                     // because this discriminant will be loaded, and then stored into variable of
1086                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1087                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1088                     // discriminant values. That would be a bug, because then, in codegen, in order
1089                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1090                     // space necessary to represent would have to be discarded (or layout is wrong
1091                     // on thinking it needs 16 bits)
1092                     bug!(
1093                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1094                         min_ity,
1095                         typeck_ity
1096                     );
1097                     // However, it is fine to make discr type however large (as an optimisation)
1098                     // after this point â€“ we’ll just truncate the value we load in codegen.
1099                 }
1100
1101                 // Check to see if we should use a different type for the
1102                 // discriminant. We can safely use a type with the same size
1103                 // as the alignment of the first field of each variant.
1104                 // We increase the size of the discriminant to avoid LLVM copying
1105                 // padding when it doesn't need to. This normally causes unaligned
1106                 // load/stores and excessive memcpy/memset operations. By using a
1107                 // bigger integer size, LLVM can be sure about its contents and
1108                 // won't be so conservative.
1109
1110                 // Use the initial field alignment
1111                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1112                     min_ity
1113                 } else {
1114                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1115                 };
1116
1117                 // If the alignment is not larger than the chosen discriminant size,
1118                 // don't use the alignment as the final size.
1119                 if ity <= min_ity {
1120                     ity = min_ity;
1121                 } else {
1122                     // Patch up the variants' first few fields.
1123                     let old_ity_size = min_ity.size();
1124                     let new_ity_size = ity.size();
1125                     for variant in &mut layout_variants {
1126                         match variant.fields {
1127                             FieldsShape::Arbitrary { ref mut offsets, .. } => {
1128                                 for i in offsets {
1129                                     if *i <= old_ity_size {
1130                                         assert_eq!(*i, old_ity_size);
1131                                         *i = new_ity_size;
1132                                     }
1133                                 }
1134                                 // We might be making the struct larger.
1135                                 if variant.size <= old_ity_size {
1136                                     variant.size = new_ity_size;
1137                                 }
1138                             }
1139                             _ => bug!(),
1140                         }
1141                     }
1142                 }
1143
1144                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1145                 let tag = Scalar {
1146                     value: Int(ity, signed),
1147                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1148                 };
1149                 let mut abi = Abi::Aggregate { sized: true };
1150                 if tag.value.size(dl) == size {
1151                     abi = Abi::Scalar(tag.clone());
1152                 } else {
1153                     // Try to use a ScalarPair for all tagged enums.
1154                     let mut common_prim = None;
1155                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1156                         let offsets = match layout_variant.fields {
1157                             FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1158                             _ => bug!(),
1159                         };
1160                         let mut fields =
1161                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1162                         let (field, offset) = match (fields.next(), fields.next()) {
1163                             (None, None) => continue,
1164                             (Some(pair), None) => pair,
1165                             _ => {
1166                                 common_prim = None;
1167                                 break;
1168                             }
1169                         };
1170                         let prim = match field.abi {
1171                             Abi::Scalar(ref scalar) => scalar.value,
1172                             _ => {
1173                                 common_prim = None;
1174                                 break;
1175                             }
1176                         };
1177                         if let Some(pair) = common_prim {
1178                             // This is pretty conservative. We could go fancier
1179                             // by conflating things like i32 and u32, or even
1180                             // realising that (u8, u8) could just cohabit with
1181                             // u16 or even u32.
1182                             if pair != (prim, offset) {
1183                                 common_prim = None;
1184                                 break;
1185                             }
1186                         } else {
1187                             common_prim = Some((prim, offset));
1188                         }
1189                     }
1190                     if let Some((prim, offset)) = common_prim {
1191                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1192                         let pair_offsets = match pair.fields {
1193                             FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1194                                 assert_eq!(memory_index, &[0, 1]);
1195                                 offsets
1196                             }
1197                             _ => bug!(),
1198                         };
1199                         if pair_offsets[0] == Size::ZERO
1200                             && pair_offsets[1] == *offset
1201                             && align == pair.align
1202                             && size == pair.size
1203                         {
1204                             // We can use `ScalarPair` only when it matches our
1205                             // already computed layout (including `#[repr(C)]`).
1206                             abi = pair.abi;
1207                         }
1208                     }
1209                 }
1210
1211                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1212                     abi = Abi::Uninhabited;
1213                 }
1214
1215                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1216
1217                 tcx.intern_layout(Layout {
1218                     variants: Variants::Multiple {
1219                         tag,
1220                         tag_encoding: TagEncoding::Direct,
1221                         tag_field: 0,
1222                         variants: layout_variants,
1223                     },
1224                     fields: FieldsShape::Arbitrary {
1225                         offsets: vec![Size::ZERO],
1226                         memory_index: vec![0],
1227                     },
1228                     largest_niche,
1229                     abi,
1230                     align,
1231                     size,
1232                 })
1233             }
1234
1235             // Types with no meaningful known layout.
1236             ty::Projection(_) | ty::Opaque(..) => {
1237                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1238                 if ty == normalized {
1239                     return Err(LayoutError::Unknown(ty));
1240                 }
1241                 tcx.layout_raw(param_env.and(normalized))?
1242             }
1243
1244             ty::Bound(..) | ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1245                 bug!("Layout::compute: unexpected type `{}`", ty)
1246             }
1247
1248             ty::Param(_) | ty::Error(_) => {
1249                 return Err(LayoutError::Unknown(ty));
1250             }
1251         })
1252     }
1253 }
1254
1255 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1256 #[derive(Clone, Debug, PartialEq)]
1257 enum SavedLocalEligibility {
1258     Unassigned,
1259     Assigned(VariantIdx),
1260     // FIXME: Use newtype_index so we aren't wasting bytes
1261     Ineligible(Option<u32>),
1262 }
1263
1264 // When laying out generators, we divide our saved local fields into two
1265 // categories: overlap-eligible and overlap-ineligible.
1266 //
1267 // Those fields which are ineligible for overlap go in a "prefix" at the
1268 // beginning of the layout, and always have space reserved for them.
1269 //
1270 // Overlap-eligible fields are only assigned to one variant, so we lay
1271 // those fields out for each variant and put them right after the
1272 // prefix.
1273 //
1274 // Finally, in the layout details, we point to the fields from the
1275 // variants they are assigned to. It is possible for some fields to be
1276 // included in multiple variants. No field ever "moves around" in the
1277 // layout; its offset is always the same.
1278 //
1279 // Also included in the layout are the upvars and the discriminant.
1280 // These are included as fields on the "outer" layout; they are not part
1281 // of any variant.
1282 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1283     /// Compute the eligibility and assignment of each local.
1284     fn generator_saved_local_eligibility(
1285         &self,
1286         info: &GeneratorLayout<'tcx>,
1287     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1288         use SavedLocalEligibility::*;
1289
1290         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1291             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1292
1293         // The saved locals not eligible for overlap. These will get
1294         // "promoted" to the prefix of our generator.
1295         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1296
1297         // Figure out which of our saved locals are fields in only
1298         // one variant. The rest are deemed ineligible for overlap.
1299         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1300             for local in fields {
1301                 match assignments[*local] {
1302                     Unassigned => {
1303                         assignments[*local] = Assigned(variant_index);
1304                     }
1305                     Assigned(idx) => {
1306                         // We've already seen this local at another suspension
1307                         // point, so it is no longer a candidate.
1308                         trace!(
1309                             "removing local {:?} in >1 variant ({:?}, {:?})",
1310                             local,
1311                             variant_index,
1312                             idx
1313                         );
1314                         ineligible_locals.insert(*local);
1315                         assignments[*local] = Ineligible(None);
1316                     }
1317                     Ineligible(_) => {}
1318                 }
1319             }
1320         }
1321
1322         // Next, check every pair of eligible locals to see if they
1323         // conflict.
1324         for local_a in info.storage_conflicts.rows() {
1325             let conflicts_a = info.storage_conflicts.count(local_a);
1326             if ineligible_locals.contains(local_a) {
1327                 continue;
1328             }
1329
1330             for local_b in info.storage_conflicts.iter(local_a) {
1331                 // local_a and local_b are storage live at the same time, therefore they
1332                 // cannot overlap in the generator layout. The only way to guarantee
1333                 // this is if they are in the same variant, or one is ineligible
1334                 // (which means it is stored in every variant).
1335                 if ineligible_locals.contains(local_b)
1336                     || assignments[local_a] == assignments[local_b]
1337                 {
1338                     continue;
1339                 }
1340
1341                 // If they conflict, we will choose one to make ineligible.
1342                 // This is not always optimal; it's just a greedy heuristic that
1343                 // seems to produce good results most of the time.
1344                 let conflicts_b = info.storage_conflicts.count(local_b);
1345                 let (remove, other) =
1346                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1347                 ineligible_locals.insert(remove);
1348                 assignments[remove] = Ineligible(None);
1349                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1350             }
1351         }
1352
1353         // Count the number of variants in use. If only one of them, then it is
1354         // impossible to overlap any locals in our layout. In this case it's
1355         // always better to make the remaining locals ineligible, so we can
1356         // lay them out with the other locals in the prefix and eliminate
1357         // unnecessary padding bytes.
1358         {
1359             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1360             for assignment in &assignments {
1361                 if let Assigned(idx) = assignment {
1362                     used_variants.insert(*idx);
1363                 }
1364             }
1365             if used_variants.count() < 2 {
1366                 for assignment in assignments.iter_mut() {
1367                     *assignment = Ineligible(None);
1368                 }
1369                 ineligible_locals.insert_all();
1370             }
1371         }
1372
1373         // Write down the order of our locals that will be promoted to the prefix.
1374         {
1375             for (idx, local) in ineligible_locals.iter().enumerate() {
1376                 assignments[local] = Ineligible(Some(idx as u32));
1377             }
1378         }
1379         debug!("generator saved local assignments: {:?}", assignments);
1380
1381         (ineligible_locals, assignments)
1382     }
1383
1384     /// Compute the full generator layout.
1385     fn generator_layout(
1386         &self,
1387         ty: Ty<'tcx>,
1388         def_id: hir::def_id::DefId,
1389         substs: SubstsRef<'tcx>,
1390     ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1391         use SavedLocalEligibility::*;
1392         let tcx = self.tcx;
1393
1394         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1395
1396         let info = tcx.generator_layout(def_id);
1397         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1398
1399         // Build a prefix layout, including "promoting" all ineligible
1400         // locals as part of the prefix. We compute the layout of all of
1401         // these fields at once to get optimal packing.
1402         let tag_index = substs.as_generator().prefix_tys().count();
1403
1404         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1405         let max_discr = (info.variant_fields.len() - 1) as u128;
1406         let discr_int = Integer::fit_unsigned(max_discr);
1407         let discr_int_ty = discr_int.to_ty(tcx, false);
1408         let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1409         let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1410         let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1411
1412         let promoted_layouts = ineligible_locals
1413             .iter()
1414             .map(|local| subst_field(info.field_tys[local]))
1415             .map(|ty| tcx.mk_maybe_uninit(ty))
1416             .map(|ty| self.layout_of(ty));
1417         let prefix_layouts = substs
1418             .as_generator()
1419             .prefix_tys()
1420             .map(|ty| self.layout_of(ty))
1421             .chain(iter::once(Ok(tag_layout)))
1422             .chain(promoted_layouts)
1423             .collect::<Result<Vec<_>, _>>()?;
1424         let prefix = self.univariant_uninterned(
1425             ty,
1426             &prefix_layouts,
1427             &ReprOptions::default(),
1428             StructKind::AlwaysSized,
1429         )?;
1430
1431         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1432
1433         // Split the prefix layout into the "outer" fields (upvars and
1434         // discriminant) and the "promoted" fields. Promoted fields will
1435         // get included in each variant that requested them in
1436         // GeneratorLayout.
1437         debug!("prefix = {:#?}", prefix);
1438         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1439             FieldsShape::Arbitrary { mut offsets, memory_index } => {
1440                 let mut inverse_memory_index = invert_mapping(&memory_index);
1441
1442                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1443                 // "outer" and "promoted" fields respectively.
1444                 let b_start = (tag_index + 1) as u32;
1445                 let offsets_b = offsets.split_off(b_start as usize);
1446                 let offsets_a = offsets;
1447
1448                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1449                 // by preserving the order but keeping only one disjoint "half" each.
1450                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1451                 let inverse_memory_index_b: Vec<_> =
1452                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1453                 inverse_memory_index.retain(|&i| i < b_start);
1454                 let inverse_memory_index_a = inverse_memory_index;
1455
1456                 // Since `inverse_memory_index_{a,b}` each only refer to their
1457                 // respective fields, they can be safely inverted
1458                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1459                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1460
1461                 let outer_fields =
1462                     FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1463                 (outer_fields, offsets_b, memory_index_b)
1464             }
1465             _ => bug!(),
1466         };
1467
1468         let mut size = prefix.size;
1469         let mut align = prefix.align;
1470         let variants = info
1471             .variant_fields
1472             .iter_enumerated()
1473             .map(|(index, variant_fields)| {
1474                 // Only include overlap-eligible fields when we compute our variant layout.
1475                 let variant_only_tys = variant_fields
1476                     .iter()
1477                     .filter(|local| match assignments[**local] {
1478                         Unassigned => bug!(),
1479                         Assigned(v) if v == index => true,
1480                         Assigned(_) => bug!("assignment does not match variant"),
1481                         Ineligible(_) => false,
1482                     })
1483                     .map(|local| subst_field(info.field_tys[*local]));
1484
1485                 let mut variant = self.univariant_uninterned(
1486                     ty,
1487                     &variant_only_tys
1488                         .map(|ty| self.layout_of(ty))
1489                         .collect::<Result<Vec<_>, _>>()?,
1490                     &ReprOptions::default(),
1491                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1492                 )?;
1493                 variant.variants = Variants::Single { index };
1494
1495                 let (offsets, memory_index) = match variant.fields {
1496                     FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1497                     _ => bug!(),
1498                 };
1499
1500                 // Now, stitch the promoted and variant-only fields back together in
1501                 // the order they are mentioned by our GeneratorLayout.
1502                 // Because we only use some subset (that can differ between variants)
1503                 // of the promoted fields, we can't just pick those elements of the
1504                 // `promoted_memory_index` (as we'd end up with gaps).
1505                 // So instead, we build an "inverse memory_index", as if all of the
1506                 // promoted fields were being used, but leave the elements not in the
1507                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1508                 // obtain a valid (bijective) mapping.
1509                 const INVALID_FIELD_IDX: u32 = !0;
1510                 let mut combined_inverse_memory_index =
1511                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1512                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1513                 let combined_offsets = variant_fields
1514                     .iter()
1515                     .enumerate()
1516                     .map(|(i, local)| {
1517                         let (offset, memory_index) = match assignments[*local] {
1518                             Unassigned => bug!(),
1519                             Assigned(_) => {
1520                                 let (offset, memory_index) =
1521                                     offsets_and_memory_index.next().unwrap();
1522                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1523                             }
1524                             Ineligible(field_idx) => {
1525                                 let field_idx = field_idx.unwrap() as usize;
1526                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1527                             }
1528                         };
1529                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1530                         offset
1531                     })
1532                     .collect();
1533
1534                 // Remove the unused slots and invert the mapping to obtain the
1535                 // combined `memory_index` (also see previous comment).
1536                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1537                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1538
1539                 variant.fields = FieldsShape::Arbitrary {
1540                     offsets: combined_offsets,
1541                     memory_index: combined_memory_index,
1542                 };
1543
1544                 size = size.max(variant.size);
1545                 align = align.max(variant.align);
1546                 Ok(variant)
1547             })
1548             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1549
1550         size = size.align_to(align.abi);
1551
1552         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1553         {
1554             Abi::Uninhabited
1555         } else {
1556             Abi::Aggregate { sized: true }
1557         };
1558
1559         let layout = tcx.intern_layout(Layout {
1560             variants: Variants::Multiple {
1561                 tag: tag,
1562                 tag_encoding: TagEncoding::Direct,
1563                 tag_field: tag_index,
1564                 variants,
1565             },
1566             fields: outer_fields,
1567             abi,
1568             largest_niche: prefix.largest_niche,
1569             size,
1570             align,
1571         });
1572         debug!("generator layout ({:?}): {:#?}", ty, layout);
1573         Ok(layout)
1574     }
1575
1576     /// This is invoked by the `layout_raw` query to record the final
1577     /// layout of each type.
1578     #[inline(always)]
1579     fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1580         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1581         // for dumping later.
1582         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1583             self.record_layout_for_printing_outlined(layout)
1584         }
1585     }
1586
1587     fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1588         // Ignore layouts that are done with non-empty environments or
1589         // non-monomorphic layouts, as the user only wants to see the stuff
1590         // resulting from the final codegen session.
1591         if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds.is_empty() {
1592             return;
1593         }
1594
1595         // (delay format until we actually need it)
1596         let record = |kind, packed, opt_discr_size, variants| {
1597             let type_desc = format!("{:?}", layout.ty);
1598             self.tcx.sess.code_stats.record_type_size(
1599                 kind,
1600                 type_desc,
1601                 layout.align.abi,
1602                 layout.size,
1603                 packed,
1604                 opt_discr_size,
1605                 variants,
1606             );
1607         };
1608
1609         let adt_def = match layout.ty.kind {
1610             ty::Adt(ref adt_def, _) => {
1611                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1612                 adt_def
1613             }
1614
1615             ty::Closure(..) => {
1616                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1617                 record(DataTypeKind::Closure, false, None, vec![]);
1618                 return;
1619             }
1620
1621             _ => {
1622                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1623                 return;
1624             }
1625         };
1626
1627         let adt_kind = adt_def.adt_kind();
1628         let adt_packed = adt_def.repr.pack.is_some();
1629
1630         let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1631             let mut min_size = Size::ZERO;
1632             let field_info: Vec<_> = flds
1633                 .iter()
1634                 .enumerate()
1635                 .map(|(i, &name)| match layout.field(self, i) {
1636                     Err(err) => {
1637                         bug!("no layout found for field {}: `{:?}`", name, err);
1638                     }
1639                     Ok(field_layout) => {
1640                         let offset = layout.fields.offset(i);
1641                         let field_end = offset + field_layout.size;
1642                         if min_size < field_end {
1643                             min_size = field_end;
1644                         }
1645                         FieldInfo {
1646                             name: name.to_string(),
1647                             offset: offset.bytes(),
1648                             size: field_layout.size.bytes(),
1649                             align: field_layout.align.abi.bytes(),
1650                         }
1651                     }
1652                 })
1653                 .collect();
1654
1655             VariantInfo {
1656                 name: n.map(|n| n.to_string()),
1657                 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1658                 align: layout.align.abi.bytes(),
1659                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1660                 fields: field_info,
1661             }
1662         };
1663
1664         match layout.variants {
1665             Variants::Single { index } => {
1666                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1667                 if !adt_def.variants.is_empty() {
1668                     let variant_def = &adt_def.variants[index];
1669                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1670                     record(
1671                         adt_kind.into(),
1672                         adt_packed,
1673                         None,
1674                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1675                     );
1676                 } else {
1677                     // (This case arises for *empty* enums; so give it
1678                     // zero variants.)
1679                     record(adt_kind.into(), adt_packed, None, vec![]);
1680                 }
1681             }
1682
1683             Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1684                 debug!(
1685                     "print-type-size `{:#?}` adt general variants def {}",
1686                     layout.ty,
1687                     adt_def.variants.len()
1688                 );
1689                 let variant_infos: Vec<_> = adt_def
1690                     .variants
1691                     .iter_enumerated()
1692                     .map(|(i, variant_def)| {
1693                         let fields: Vec<_> =
1694                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1695                         build_variant_info(
1696                             Some(variant_def.ident),
1697                             &fields,
1698                             layout.for_variant(self, i),
1699                         )
1700                     })
1701                     .collect();
1702                 record(
1703                     adt_kind.into(),
1704                     adt_packed,
1705                     match tag_encoding {
1706                         TagEncoding::Direct => Some(tag.value.size(self)),
1707                         _ => None,
1708                     },
1709                     variant_infos,
1710                 );
1711             }
1712         }
1713     }
1714 }
1715
1716 /// Type size "skeleton", i.e., the only information determining a type's size.
1717 /// While this is conservative, (aside from constant sizes, only pointers,
1718 /// newtypes thereof and null pointer optimized enums are allowed), it is
1719 /// enough to statically check common use cases of transmute.
1720 #[derive(Copy, Clone, Debug)]
1721 pub enum SizeSkeleton<'tcx> {
1722     /// Any statically computable Layout.
1723     Known(Size),
1724
1725     /// A potentially-fat pointer.
1726     Pointer {
1727         /// If true, this pointer is never null.
1728         non_zero: bool,
1729         /// The type which determines the unsized metadata, if any,
1730         /// of this pointer. Either a type parameter or a projection
1731         /// depending on one, with regions erased.
1732         tail: Ty<'tcx>,
1733     },
1734 }
1735
1736 impl<'tcx> SizeSkeleton<'tcx> {
1737     pub fn compute(
1738         ty: Ty<'tcx>,
1739         tcx: TyCtxt<'tcx>,
1740         param_env: ty::ParamEnv<'tcx>,
1741     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1742         debug_assert!(!ty.has_infer_types_or_consts());
1743
1744         // First try computing a static layout.
1745         let err = match tcx.layout_of(param_env.and(ty)) {
1746             Ok(layout) => {
1747                 return Ok(SizeSkeleton::Known(layout.size));
1748             }
1749             Err(err) => err,
1750         };
1751
1752         match ty.kind {
1753             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1754                 let non_zero = !ty.is_unsafe_ptr();
1755                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1756                 match tail.kind {
1757                     ty::Param(_) | ty::Projection(_) => {
1758                         debug_assert!(tail.has_param_types_or_consts());
1759                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
1760                     }
1761                     _ => bug!(
1762                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1763                               tail `{}` is not a type parameter or a projection",
1764                         ty,
1765                         err,
1766                         tail
1767                     ),
1768                 }
1769             }
1770
1771             ty::Adt(def, substs) => {
1772                 // Only newtypes and enums w/ nullable pointer optimization.
1773                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1774                     return Err(err);
1775                 }
1776
1777                 // Get a zero-sized variant or a pointer newtype.
1778                 let zero_or_ptr_variant = |i| {
1779                     let i = VariantIdx::new(i);
1780                     let fields = def.variants[i]
1781                         .fields
1782                         .iter()
1783                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1784                     let mut ptr = None;
1785                     for field in fields {
1786                         let field = field?;
1787                         match field {
1788                             SizeSkeleton::Known(size) => {
1789                                 if size.bytes() > 0 {
1790                                     return Err(err);
1791                                 }
1792                             }
1793                             SizeSkeleton::Pointer { .. } => {
1794                                 if ptr.is_some() {
1795                                     return Err(err);
1796                                 }
1797                                 ptr = Some(field);
1798                             }
1799                         }
1800                     }
1801                     Ok(ptr)
1802                 };
1803
1804                 let v0 = zero_or_ptr_variant(0)?;
1805                 // Newtype.
1806                 if def.variants.len() == 1 {
1807                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1808                         return Ok(SizeSkeleton::Pointer {
1809                             non_zero: non_zero
1810                                 || match tcx.layout_scalar_valid_range(def.did) {
1811                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1812                                     (Bound::Included(start), Bound::Included(end)) => {
1813                                         0 < start && start < end
1814                                     }
1815                                     _ => false,
1816                                 },
1817                             tail,
1818                         });
1819                     } else {
1820                         return Err(err);
1821                     }
1822                 }
1823
1824                 let v1 = zero_or_ptr_variant(1)?;
1825                 // Nullable pointer enum optimization.
1826                 match (v0, v1) {
1827                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1828                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1829                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1830                     }
1831                     _ => Err(err),
1832                 }
1833             }
1834
1835             ty::Projection(_) | ty::Opaque(..) => {
1836                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1837                 if ty == normalized {
1838                     Err(err)
1839                 } else {
1840                     SizeSkeleton::compute(normalized, tcx, param_env)
1841                 }
1842             }
1843
1844             _ => Err(err),
1845         }
1846     }
1847
1848     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1849         match (self, other) {
1850             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1851             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1852                 a == b
1853             }
1854             _ => false,
1855         }
1856     }
1857 }
1858
1859 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1860     fn tcx(&self) -> TyCtxt<'tcx>;
1861 }
1862
1863 pub trait HasParamEnv<'tcx> {
1864     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1865 }
1866
1867 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1868     fn data_layout(&self) -> &TargetDataLayout {
1869         &self.data_layout
1870     }
1871 }
1872
1873 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1874     fn tcx(&self) -> TyCtxt<'tcx> {
1875         *self
1876     }
1877 }
1878
1879 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1880     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1881         self.param_env
1882     }
1883 }
1884
1885 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1886     fn data_layout(&self) -> &TargetDataLayout {
1887         self.tcx.data_layout()
1888     }
1889 }
1890
1891 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1892     fn tcx(&self) -> TyCtxt<'tcx> {
1893         self.tcx.tcx()
1894     }
1895 }
1896
1897 pub type TyAndLayout<'tcx> = ::rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1898
1899 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1900     type Ty = Ty<'tcx>;
1901     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1902
1903     /// Computes the layout of a type. Note that this implicitly
1904     /// executes in "reveal all" mode.
1905     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1906         let param_env = self.param_env.with_reveal_all();
1907         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1908         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1909         let layout = TyAndLayout { ty, layout };
1910
1911         // N.B., this recording is normally disabled; when enabled, it
1912         // can however trigger recursive invocations of `layout_of`.
1913         // Therefore, we execute it *after* the main query has
1914         // completed, to avoid problems around recursive structures
1915         // and the like. (Admittedly, I wasn't able to reproduce a problem
1916         // here, but it seems like the right thing to do. -nmatsakis)
1917         self.record_layout_for_printing(layout);
1918
1919         Ok(layout)
1920     }
1921 }
1922
1923 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1924     type Ty = Ty<'tcx>;
1925     type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1926
1927     /// Computes the layout of a type. Note that this implicitly
1928     /// executes in "reveal all" mode.
1929     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1930         let param_env = self.param_env.with_reveal_all();
1931         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1932         let layout = self.tcx.layout_raw(param_env.and(ty))?;
1933         let layout = TyAndLayout { ty, layout };
1934
1935         // N.B., this recording is normally disabled; when enabled, it
1936         // can however trigger recursive invocations of `layout_of`.
1937         // Therefore, we execute it *after* the main query has
1938         // completed, to avoid problems around recursive structures
1939         // and the like. (Admittedly, I wasn't able to reproduce a problem
1940         // here, but it seems like the right thing to do. -nmatsakis)
1941         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
1942         cx.record_layout_for_printing(layout);
1943
1944         Ok(layout)
1945     }
1946 }
1947
1948 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1949 impl TyCtxt<'tcx> {
1950     /// Computes the layout of a type. Note that this implicitly
1951     /// executes in "reveal all" mode.
1952     #[inline]
1953     pub fn layout_of(
1954         self,
1955         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1956     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1957         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
1958         cx.layout_of(param_env_and_ty.value)
1959     }
1960 }
1961
1962 impl ty::query::TyCtxtAt<'tcx> {
1963     /// Computes the layout of a type. Note that this implicitly
1964     /// executes in "reveal all" mode.
1965     #[inline]
1966     pub fn layout_of(
1967         self,
1968         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1969     ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1970         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
1971         cx.layout_of(param_env_and_ty.value)
1972     }
1973 }
1974
1975 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
1976 where
1977     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
1978         + HasTyCtxt<'tcx>
1979         + HasParamEnv<'tcx>,
1980 {
1981     fn for_variant(
1982         this: TyAndLayout<'tcx>,
1983         cx: &C,
1984         variant_index: VariantIdx,
1985     ) -> TyAndLayout<'tcx> {
1986         let layout = match this.variants {
1987             Variants::Single { index }
1988                 // If all variants but one are uninhabited, the variant layout is the enum layout.
1989                 if index == variant_index &&
1990                 // Don't confuse variants of uninhabited enums with the enum itself.
1991                 // For more details see https://github.com/rust-lang/rust/issues/69763.
1992                 this.fields != FieldsShape::Primitive =>
1993             {
1994                 this.layout
1995             }
1996
1997             Variants::Single { index } => {
1998                 // Deny calling for_variant more than once for non-Single enums.
1999                 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2000                     assert_eq!(original_layout.variants, Variants::Single { index });
2001                 }
2002
2003                 let fields = match this.ty.kind {
2004                     ty::Adt(def, _) if def.variants.is_empty() =>
2005                         bug!("for_variant called on zero-variant enum"),
2006                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2007                     _ => bug!(),
2008                 };
2009                 let tcx = cx.tcx();
2010                 tcx.intern_layout(Layout {
2011                     variants: Variants::Single { index: variant_index },
2012                     fields: match NonZeroUsize::new(fields) {
2013                         Some(fields) => FieldsShape::Union(fields),
2014                         None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2015                     },
2016                     abi: Abi::Uninhabited,
2017                     largest_niche: None,
2018                     align: tcx.data_layout.i8_align,
2019                     size: Size::ZERO,
2020                 })
2021             }
2022
2023             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2024         };
2025
2026         assert_eq!(layout.variants, Variants::Single { index: variant_index });
2027
2028         TyAndLayout { ty: this.ty, layout }
2029     }
2030
2031     fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2032         let tcx = cx.tcx();
2033         let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2034             let layout = Layout::scalar(cx, tag.clone());
2035             MaybeResult::from(Ok(TyAndLayout {
2036                 layout: tcx.intern_layout(layout),
2037                 ty: tag.value.to_ty(tcx),
2038             }))
2039         };
2040
2041         cx.layout_of(match this.ty.kind {
2042             ty::Bool
2043             | ty::Char
2044             | ty::Int(_)
2045             | ty::Uint(_)
2046             | ty::Float(_)
2047             | ty::FnPtr(_)
2048             | ty::Never
2049             | ty::FnDef(..)
2050             | ty::GeneratorWitness(..)
2051             | ty::Foreign(..)
2052             | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2053
2054             // Potentially-fat pointers.
2055             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2056                 assert!(i < this.fields.count());
2057
2058                 // Reuse the fat `*T` type as its own thin pointer data field.
2059                 // This provides information about, e.g., DST struct pointees
2060                 // (which may have no non-DST form), and will work as long
2061                 // as the `Abi` or `FieldsShape` is checked by users.
2062                 if i == 0 {
2063                     let nil = tcx.mk_unit();
2064                     let ptr_ty = if this.ty.is_unsafe_ptr() {
2065                         tcx.mk_mut_ptr(nil)
2066                     } else {
2067                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2068                     };
2069                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2070                         |mut ptr_layout| {
2071                             ptr_layout.ty = this.ty;
2072                             ptr_layout
2073                         },
2074                     ));
2075                 }
2076
2077                 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
2078                     ty::Slice(_) | ty::Str => tcx.types.usize,
2079                     ty::Dynamic(_, _) => {
2080                         tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2081                         /* FIXME: use actual fn pointers
2082                         Warning: naively computing the number of entries in the
2083                         vtable by counting the methods on the trait + methods on
2084                         all parent traits does not work, because some methods can
2085                         be not object safe and thus excluded from the vtable.
2086                         Increase this counter if you tried to implement this but
2087                         failed to do it without duplicating a lot of code from
2088                         other places in the compiler: 2
2089                         tcx.mk_tup(&[
2090                             tcx.mk_array(tcx.types.usize, 3),
2091                             tcx.mk_array(Option<fn()>),
2092                         ])
2093                         */
2094                     }
2095                     _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2096                 }
2097             }
2098
2099             // Arrays and slices.
2100             ty::Array(element, _) | ty::Slice(element) => element,
2101             ty::Str => tcx.types.u8,
2102
2103             // Tuples, generators and closures.
2104             ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
2105
2106             ty::Generator(def_id, ref substs, _) => match this.variants {
2107                 Variants::Single { index } => substs
2108                     .as_generator()
2109                     .state_tys(def_id, tcx)
2110                     .nth(index.as_usize())
2111                     .unwrap()
2112                     .nth(i)
2113                     .unwrap(),
2114                 Variants::Multiple { ref tag, tag_field, .. } => {
2115                     if i == tag_field {
2116                         return tag_layout(tag);
2117                     }
2118                     substs.as_generator().prefix_tys().nth(i).unwrap()
2119                 }
2120             },
2121
2122             ty::Tuple(tys) => tys[i].expect_ty(),
2123
2124             // SIMD vector types.
2125             ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
2126
2127             // ADTs.
2128             ty::Adt(def, substs) => {
2129                 match this.variants {
2130                     Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2131
2132                     // Discriminant field for enums (where applicable).
2133                     Variants::Multiple { ref tag, .. } => {
2134                         assert_eq!(i, 0);
2135                         return tag_layout(tag);
2136                     }
2137                 }
2138             }
2139
2140             ty::Projection(_)
2141             | ty::Bound(..)
2142             | ty::Placeholder(..)
2143             | ty::Opaque(..)
2144             | ty::Param(_)
2145             | ty::Infer(_)
2146             | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2147         })
2148     }
2149
2150     fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2151         match this.ty.kind {
2152             ty::RawPtr(mt) if offset.bytes() == 0 => {
2153                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2154                     size: layout.size,
2155                     align: layout.align.abi,
2156                     safe: None,
2157                 })
2158             }
2159
2160             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2161                 let tcx = cx.tcx();
2162                 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2163                 let kind = match mt {
2164                     hir::Mutability::Not => {
2165                         if is_freeze {
2166                             PointerKind::Frozen
2167                         } else {
2168                             PointerKind::Shared
2169                         }
2170                     }
2171                     hir::Mutability::Mut => {
2172                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2173                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2174                         // in conjunction with unwinding, but later versions didn’t seem to have
2175                         // said issues. See issue #31681.
2176                         //
2177                         // Alas, later on we encountered a case where noalias would generate wrong
2178                         // code altogether even with recent versions of LLVM in *safe* code with no
2179                         // unwinding involved. See #54462.
2180                         //
2181                         // For now, do not enable mutable_noalias by default at all, while the
2182                         // issue is being figured out.
2183                         if tcx.sess.opts.debugging_opts.mutable_noalias {
2184                             PointerKind::UniqueBorrowed
2185                         } else {
2186                             PointerKind::Shared
2187                         }
2188                     }
2189                 };
2190
2191                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2192                     size: layout.size,
2193                     align: layout.align.abi,
2194                     safe: Some(kind),
2195                 })
2196             }
2197
2198             _ => {
2199                 let mut data_variant = match this.variants {
2200                     // Within the discriminant field, only the niche itself is
2201                     // always initialized, so we only check for a pointer at its
2202                     // offset.
2203                     //
2204                     // If the niche is a pointer, it's either valid (according
2205                     // to its type), or null (which the niche field's scalar
2206                     // validity range encodes).  This allows using
2207                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2208                     // this will continue to work as long as we don't start
2209                     // using more niches than just null (e.g., the first page of
2210                     // the address space, or unaligned pointers).
2211                     Variants::Multiple {
2212                         tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2213                         tag_field,
2214                         ..
2215                     } if this.fields.offset(tag_field) == offset => {
2216                         Some(this.for_variant(cx, dataful_variant))
2217                     }
2218                     _ => Some(this),
2219                 };
2220
2221                 if let Some(variant) = data_variant {
2222                     // We're not interested in any unions.
2223                     if let FieldsShape::Union(_) = variant.fields {
2224                         data_variant = None;
2225                     }
2226                 }
2227
2228                 let mut result = None;
2229
2230                 if let Some(variant) = data_variant {
2231                     let ptr_end = offset + Pointer.size(cx);
2232                     for i in 0..variant.fields.count() {
2233                         let field_start = variant.fields.offset(i);
2234                         if field_start <= offset {
2235                             let field = variant.field(cx, i);
2236                             result = field.to_result().ok().and_then(|field| {
2237                                 if ptr_end <= field_start + field.size {
2238                                     // We found the right field, look inside it.
2239                                     field.pointee_info_at(cx, offset - field_start)
2240                                 } else {
2241                                     None
2242                                 }
2243                             });
2244                             if result.is_some() {
2245                                 break;
2246                             }
2247                         }
2248                     }
2249                 }
2250
2251                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2252                 if let Some(ref mut pointee) = result {
2253                     if let ty::Adt(def, _) = this.ty.kind {
2254                         if def.is_box() && offset.bytes() == 0 {
2255                             pointee.safe = Some(PointerKind::UniqueOwned);
2256                         }
2257                     }
2258                 }
2259
2260                 result
2261             }
2262         }
2263     }
2264 }
2265
2266 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2267     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2268         use crate::ty::layout::LayoutError::*;
2269         mem::discriminant(self).hash_stable(hcx, hasher);
2270
2271         match *self {
2272             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2273         }
2274     }
2275 }
2276
2277 impl<'tcx> ty::Instance<'tcx> {
2278     // NOTE(eddyb) this is private to avoid using it from outside of
2279     // `FnAbi::of_instance` - any other uses are either too high-level
2280     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2281     // or should go through `FnAbi` instead, to avoid losing any
2282     // adjustments `FnAbi::of_instance` might be performing.
2283     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2284         let ty = self.monomorphic_ty(tcx);
2285         match ty.kind {
2286             ty::FnDef(..) |
2287             // Shims currently have type FnPtr. Not sure this should remain.
2288             ty::FnPtr(_) => {
2289                 let mut sig = ty.fn_sig(tcx);
2290                 if let ty::InstanceDef::VtableShim(..) = self.def {
2291                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2292                     sig = sig.map_bound(|mut sig| {
2293                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2294                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2295                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2296                         sig
2297                     });
2298                 }
2299                 sig
2300             }
2301             ty::Closure(def_id, substs) => {
2302                 let sig = substs.as_closure().sig();
2303
2304                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2305                 sig.map_bound(|sig| tcx.mk_fn_sig(
2306                     iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2307                     sig.output(),
2308                     sig.c_variadic,
2309                     sig.unsafety,
2310                     sig.abi
2311                 ))
2312             }
2313             ty::Generator(_, substs, _) => {
2314                 let sig = substs.as_generator().poly_sig();
2315
2316                 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2317                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2318
2319                 let pin_did = tcx.require_lang_item(PinTypeLangItem, None);
2320                 let pin_adt_ref = tcx.adt_def(pin_did);
2321                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2322                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2323
2324                 sig.map_bound(|sig| {
2325                     let state_did = tcx.require_lang_item(GeneratorStateLangItem, None);
2326                     let state_adt_ref = tcx.adt_def(state_did);
2327                     let state_substs = tcx.intern_substs(&[
2328                         sig.yield_ty.into(),
2329                         sig.return_ty.into(),
2330                     ]);
2331                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2332
2333                     tcx.mk_fn_sig(
2334                         [env_ty, sig.resume_ty].iter(),
2335                         &ret_ty,
2336                         false,
2337                         hir::Unsafety::Normal,
2338                         rustc_target::spec::abi::Abi::Rust
2339                     )
2340                 })
2341             }
2342             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty)
2343         }
2344     }
2345 }
2346
2347 pub trait FnAbiExt<'tcx, C>
2348 where
2349     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2350         + HasDataLayout
2351         + HasTargetSpec
2352         + HasTyCtxt<'tcx>
2353         + HasParamEnv<'tcx>,
2354 {
2355     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2356     ///
2357     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2358     /// instead, where the instance is a `InstanceDef::Virtual`.
2359     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2360
2361     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2362     /// direct calls to an `fn`.
2363     ///
2364     /// NB: that includes virtual calls, which are represented by "direct calls"
2365     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2366     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2367
2368     fn new_internal(
2369         cx: &C,
2370         sig: ty::PolyFnSig<'tcx>,
2371         extra_args: &[Ty<'tcx>],
2372         caller_location: Option<Ty<'tcx>>,
2373         codegen_fn_attr_flags: CodegenFnAttrFlags,
2374         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2375     ) -> Self;
2376     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2377 }
2378
2379 fn fn_can_unwind(
2380     panic_strategy: PanicStrategy,
2381     codegen_fn_attr_flags: CodegenFnAttrFlags,
2382     call_conv: Conv,
2383 ) -> bool {
2384     if panic_strategy != PanicStrategy::Unwind {
2385         // In panic=abort mode we assume nothing can unwind anywhere, so
2386         // optimize based on this!
2387         false
2388     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2389         // If a specific #[unwind] attribute is present, use that.
2390         true
2391     } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2392         // Special attribute for allocator functions, which can't unwind.
2393         false
2394     } else {
2395         if call_conv == Conv::Rust {
2396             // Any Rust method (or `extern "Rust" fn` or `extern
2397             // "rust-call" fn`) is explicitly allowed to unwind
2398             // (unless it has no-unwind attribute, handled above).
2399             true
2400         } else {
2401             // Anything else is either:
2402             //
2403             //  1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2404             //
2405             //  2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2406             //
2407             // Foreign items (case 1) are assumed to not unwind; it is
2408             // UB otherwise. (At least for now; see also
2409             // rust-lang/rust#63909 and Rust RFC 2753.)
2410             //
2411             // Items defined in Rust with non-Rust ABIs (case 2) are also
2412             // not supposed to unwind. Whether this should be enforced
2413             // (versus stating it is UB) and *how* it would be enforced
2414             // is currently under discussion; see rust-lang/rust#58794.
2415             //
2416             // In either case, we mark item as explicitly nounwind.
2417             false
2418         }
2419     }
2420 }
2421
2422 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2423 where
2424     C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2425         + HasDataLayout
2426         + HasTargetSpec
2427         + HasTyCtxt<'tcx>
2428         + HasParamEnv<'tcx>,
2429 {
2430     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2431         // Assume that fn pointers may always unwind
2432         let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2433
2434         call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2435             ArgAbi::new(cx.layout_of(ty))
2436         })
2437     }
2438
2439     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2440         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2441
2442         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2443             Some(cx.tcx().caller_location_ty())
2444         } else {
2445             None
2446         };
2447
2448         let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2449
2450         call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2451             let mut layout = cx.layout_of(ty);
2452             // Don't pass the vtable, it's not an argument of the virtual fn.
2453             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2454             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2455             if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2456                 let fat_pointer_ty = if layout.is_unsized() {
2457                     // unsized `self` is passed as a pointer to `self`
2458                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2459                     cx.tcx().mk_mut_ptr(layout.ty)
2460                 } else {
2461                     match layout.abi {
2462                         Abi::ScalarPair(..) => (),
2463                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2464                     }
2465
2466                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2467                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2468                     // elsewhere in the compiler as a method on a `dyn Trait`.
2469                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2470                     // get a built-in pointer type
2471                     let mut fat_pointer_layout = layout;
2472                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2473                         && !fat_pointer_layout.ty.is_region_ptr()
2474                     {
2475                         for i in 0..fat_pointer_layout.fields.count() {
2476                             let field_layout = fat_pointer_layout.field(cx, i);
2477
2478                             if !field_layout.is_zst() {
2479                                 fat_pointer_layout = field_layout;
2480                                 continue 'descend_newtypes;
2481                             }
2482                         }
2483
2484                         bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2485                     }
2486
2487                     fat_pointer_layout.ty
2488                 };
2489
2490                 // we now have a type like `*mut RcBox<dyn Trait>`
2491                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2492                 // this is understood as a special case elsewhere in the compiler
2493                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2494                 layout = cx.layout_of(unit_pointer_ty);
2495                 layout.ty = fat_pointer_ty;
2496             }
2497             ArgAbi::new(layout)
2498         })
2499     }
2500
2501     fn new_internal(
2502         cx: &C,
2503         sig: ty::PolyFnSig<'tcx>,
2504         extra_args: &[Ty<'tcx>],
2505         caller_location: Option<Ty<'tcx>>,
2506         codegen_fn_attr_flags: CodegenFnAttrFlags,
2507         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2508     ) -> Self {
2509         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2510
2511         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2512
2513         use rustc_target::spec::abi::Abi::*;
2514         let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2515             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2516
2517             // It's the ABI's job to select this, not ours.
2518             System => bug!("system abi should be selected elsewhere"),
2519             EfiApi => bug!("eficall abi should be selected elsewhere"),
2520
2521             Stdcall => Conv::X86Stdcall,
2522             Fastcall => Conv::X86Fastcall,
2523             Vectorcall => Conv::X86VectorCall,
2524             Thiscall => Conv::X86ThisCall,
2525             C => Conv::C,
2526             Unadjusted => Conv::C,
2527             Win64 => Conv::X86_64Win64,
2528             SysV64 => Conv::X86_64SysV,
2529             Aapcs => Conv::ArmAapcs,
2530             PtxKernel => Conv::PtxKernel,
2531             Msp430Interrupt => Conv::Msp430Intr,
2532             X86Interrupt => Conv::X86Intr,
2533             AmdGpuKernel => Conv::AmdGpuKernel,
2534             AvrInterrupt => Conv::AvrInterrupt,
2535             AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2536
2537             // These API constants ought to be more specific...
2538             Cdecl => Conv::C,
2539         };
2540
2541         let mut inputs = sig.inputs();
2542         let extra_args = if sig.abi == RustCall {
2543             assert!(!sig.c_variadic && extra_args.is_empty());
2544
2545             if let Some(input) = sig.inputs().last() {
2546                 if let ty::Tuple(tupled_arguments) = input.kind {
2547                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2548                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2549                 } else {
2550                     bug!(
2551                         "argument to function with \"rust-call\" ABI \
2552                             is not a tuple"
2553                     );
2554                 }
2555             } else {
2556                 bug!(
2557                     "argument to function with \"rust-call\" ABI \
2558                         is not a tuple"
2559                 );
2560             }
2561         } else {
2562             assert!(sig.c_variadic || extra_args.is_empty());
2563             extra_args.to_vec()
2564         };
2565
2566         let target = &cx.tcx().sess.target.target;
2567         let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
2568         let win_x64_gnu =
2569             target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2570         let linux_s390x_gnu_like =
2571             target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2572         let linux_sparc64_gnu_like =
2573             target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2574         let linux_powerpc_gnu_like =
2575             target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2576         let rust_abi = match sig.abi {
2577             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2578             _ => false,
2579         };
2580
2581         // Handle safe Rust thin and fat pointers.
2582         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2583                                       scalar: &Scalar,
2584                                       layout: TyAndLayout<'tcx>,
2585                                       offset: Size,
2586                                       is_return: bool| {
2587             // Booleans are always an i1 that needs to be zero-extended.
2588             if scalar.is_bool() {
2589                 attrs.set(ArgAttribute::ZExt);
2590                 return;
2591             }
2592
2593             // Only pointer types handled below.
2594             if scalar.value != Pointer {
2595                 return;
2596             }
2597
2598             if scalar.valid_range.start() < scalar.valid_range.end() {
2599                 if *scalar.valid_range.start() > 0 {
2600                     attrs.set(ArgAttribute::NonNull);
2601                 }
2602             }
2603
2604             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2605                 if let Some(kind) = pointee.safe {
2606                     attrs.pointee_align = Some(pointee.align);
2607
2608                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2609                     // for the entire duration of the function as they can be deallocated
2610                     // at any time. Set their valid size to 0.
2611                     attrs.pointee_size = match kind {
2612                         PointerKind::UniqueOwned => Size::ZERO,
2613                         _ => pointee.size,
2614                     };
2615
2616                     // `Box` pointer parameters never alias because ownership is transferred
2617                     // `&mut` pointer parameters never alias other parameters,
2618                     // or mutable global data
2619                     //
2620                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2621                     // and can be marked as both `readonly` and `noalias`, as
2622                     // LLVM's definition of `noalias` is based solely on memory
2623                     // dependencies rather than pointer equality
2624                     let no_alias = match kind {
2625                         PointerKind::Shared => false,
2626                         PointerKind::UniqueOwned => true,
2627                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2628                     };
2629                     if no_alias {
2630                         attrs.set(ArgAttribute::NoAlias);
2631                     }
2632
2633                     if kind == PointerKind::Frozen && !is_return {
2634                         attrs.set(ArgAttribute::ReadOnly);
2635                     }
2636                 }
2637             }
2638         };
2639
2640         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2641             let is_return = arg_idx.is_none();
2642             let mut arg = mk_arg_type(ty, arg_idx);
2643             if arg.layout.is_zst() {
2644                 // For some forsaken reason, x86_64-pc-windows-gnu
2645                 // doesn't ignore zero-sized struct arguments.
2646                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2647                 if is_return
2648                     || rust_abi
2649                     || (!win_x64_gnu
2650                         && !linux_s390x_gnu_like
2651                         && !linux_sparc64_gnu_like
2652                         && !linux_powerpc_gnu_like)
2653                 {
2654                     arg.mode = PassMode::Ignore;
2655                 }
2656             }
2657
2658             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2659             if !is_return && rust_abi {
2660                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2661                     let mut a_attrs = ArgAttributes::new();
2662                     let mut b_attrs = ArgAttributes::new();
2663                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2664                     adjust_for_rust_scalar(
2665                         &mut b_attrs,
2666                         b,
2667                         arg.layout,
2668                         a.value.size(cx).align_to(b.value.align(cx).abi),
2669                         false,
2670                     );
2671                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2672                     return arg;
2673                 }
2674             }
2675
2676             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2677                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2678                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2679                 }
2680             }
2681
2682             arg
2683         };
2684
2685         let mut fn_abi = FnAbi {
2686             ret: arg_of(sig.output(), None),
2687             args: inputs
2688                 .iter()
2689                 .cloned()
2690                 .chain(extra_args)
2691                 .chain(caller_location)
2692                 .enumerate()
2693                 .map(|(i, ty)| arg_of(ty, Some(i)))
2694                 .collect(),
2695             c_variadic: sig.c_variadic,
2696             fixed_count: inputs.len(),
2697             conv,
2698             can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2699         };
2700         fn_abi.adjust_for_abi(cx, sig.abi);
2701         fn_abi
2702     }
2703
2704     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2705         if abi == SpecAbi::Unadjusted {
2706             return;
2707         }
2708
2709         if abi == SpecAbi::Rust
2710             || abi == SpecAbi::RustCall
2711             || abi == SpecAbi::RustIntrinsic
2712             || abi == SpecAbi::PlatformIntrinsic
2713         {
2714             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2715                 if arg.is_ignore() {
2716                     return;
2717                 }
2718
2719                 match arg.layout.abi {
2720                     Abi::Aggregate { .. } => {}
2721
2722                     // This is a fun case! The gist of what this is doing is
2723                     // that we want callers and callees to always agree on the
2724                     // ABI of how they pass SIMD arguments. If we were to *not*
2725                     // make these arguments indirect then they'd be immediates
2726                     // in LLVM, which means that they'd used whatever the
2727                     // appropriate ABI is for the callee and the caller. That
2728                     // means, for example, if the caller doesn't have AVX
2729                     // enabled but the callee does, then passing an AVX argument
2730                     // across this boundary would cause corrupt data to show up.
2731                     //
2732                     // This problem is fixed by unconditionally passing SIMD
2733                     // arguments through memory between callers and callees
2734                     // which should get them all to agree on ABI regardless of
2735                     // target feature sets. Some more information about this
2736                     // issue can be found in #44367.
2737                     //
2738                     // Note that the platform intrinsic ABI is exempt here as
2739                     // that's how we connect up to LLVM and it's unstable
2740                     // anyway, we control all calls to it in libstd.
2741                     Abi::Vector { .. }
2742                         if abi != SpecAbi::PlatformIntrinsic
2743                             && cx.tcx().sess.target.target.options.simd_types_indirect =>
2744                     {
2745                         arg.make_indirect();
2746                         return;
2747                     }
2748
2749                     _ => return,
2750                 }
2751
2752                 let size = arg.layout.size;
2753                 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2754                     arg.make_indirect();
2755                 } else {
2756                     // We want to pass small aggregates as immediates, but using
2757                     // a LLVM aggregate type for this leads to bad optimizations,
2758                     // so we pick an appropriately sized integer type instead.
2759                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2760                 }
2761             };
2762             fixup(&mut self.ret);
2763             for arg in &mut self.args {
2764                 fixup(arg);
2765             }
2766             if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2767                 attrs.set(ArgAttribute::StructRet);
2768             }
2769             return;
2770         }
2771
2772         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2773             cx.tcx().sess.fatal(&msg);
2774         }
2775     }
2776 }