]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Use smaller discriminants for generators
[rust.git] / src / librustc / ty / layout.rs
1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
3
4 use rustc_ast::ast::{self, Ident, IntTy, UintTy};
5 use rustc_attr as attr;
6 use rustc_span::DUMMY_SP;
7
8 use std::cmp;
9 use std::fmt;
10 use std::iter;
11 use std::mem;
12 use std::ops::Bound;
13
14 use crate::ich::StableHashingContext;
15 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
16 use crate::ty::subst::Subst;
17 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
18 use rustc_hir as hir;
19 use rustc_index::bit_set::BitSet;
20 use rustc_index::vec::{Idx, IndexVec};
21
22 use rustc_target::abi::call::{
23     ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
24 };
25 pub use rustc_target::abi::*;
26 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec};
27
28 pub trait IntegerExt {
29     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
30     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
31     fn repr_discr<'tcx>(
32         tcx: TyCtxt<'tcx>,
33         ty: Ty<'tcx>,
34         repr: &ReprOptions,
35         min: i128,
36         max: i128,
37     ) -> (Integer, bool);
38 }
39
40 impl IntegerExt for Integer {
41     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
42         match (*self, signed) {
43             (I8, false) => tcx.types.u8,
44             (I16, false) => tcx.types.u16,
45             (I32, false) => tcx.types.u32,
46             (I64, false) => tcx.types.u64,
47             (I128, false) => tcx.types.u128,
48             (I8, true) => tcx.types.i8,
49             (I16, true) => tcx.types.i16,
50             (I32, true) => tcx.types.i32,
51             (I64, true) => tcx.types.i64,
52             (I128, true) => tcx.types.i128,
53         }
54     }
55
56     /// Gets the Integer type from an attr::IntType.
57     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
58         let dl = cx.data_layout();
59
60         match ity {
61             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
62             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
63             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
64             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
65             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
66             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
67                 dl.ptr_sized_integer()
68             }
69         }
70     }
71
72     /// Finds the appropriate Integer type and signedness for the given
73     /// signed discriminant range and #[repr] attribute.
74     /// N.B.: u128 values above i128::MAX will be treated as signed, but
75     /// that shouldn't affect anything, other than maybe debuginfo.
76     fn repr_discr<'tcx>(
77         tcx: TyCtxt<'tcx>,
78         ty: Ty<'tcx>,
79         repr: &ReprOptions,
80         min: i128,
81         max: i128,
82     ) -> (Integer, bool) {
83         // Theoretically, negative values could be larger in unsigned representation
84         // than the unsigned representation of the signed minimum. However, if there
85         // are any negative values, the only valid unsigned representation is u128
86         // which can fit all i128 values, so the result remains unaffected.
87         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
88         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
89
90         let mut min_from_extern = None;
91         let min_default = I8;
92
93         if let Some(ity) = repr.int {
94             let discr = Integer::from_attr(&tcx, ity);
95             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
96             if discr < fit {
97                 bug!(
98                     "Integer::repr_discr: `#[repr]` hint too small for \
99                       discriminant range of enum `{}",
100                     ty
101                 )
102             }
103             return (discr, ity.is_signed());
104         }
105
106         if repr.c() {
107             match &tcx.sess.target.target.arch[..] {
108                 // WARNING: the ARM EABI has two variants; the one corresponding
109                 // to `at_least == I32` appears to be used on Linux and NetBSD,
110                 // but some systems may use the variant corresponding to no
111                 // lower bound. However, we don't run on those yet...?
112                 "arm" => min_from_extern = Some(I32),
113                 _ => min_from_extern = Some(I32),
114             }
115         }
116
117         let at_least = min_from_extern.unwrap_or(min_default);
118
119         // If there are no negative values, we can use the unsigned fit.
120         if min >= 0 {
121             (cmp::max(unsigned_fit, at_least), false)
122         } else {
123             (cmp::max(signed_fit, at_least), true)
124         }
125     }
126 }
127
128 pub trait PrimitiveExt {
129     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
130     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
131 }
132
133 impl PrimitiveExt for Primitive {
134     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
135         match *self {
136             Int(i, signed) => i.to_ty(tcx, signed),
137             F32 => tcx.types.f32,
138             F64 => tcx.types.f64,
139             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
140         }
141     }
142
143     /// Return an *integer* type matching this primitive.
144     /// Useful in particular when dealing with enum discriminants.
145     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
146         match *self {
147             Int(i, signed) => i.to_ty(tcx, signed),
148             Pointer => tcx.types.usize,
149             F32 | F64 => bug!("floats do not have an int type"),
150         }
151     }
152 }
153
154 /// The first half of a fat pointer.
155 ///
156 /// - For a trait object, this is the address of the box.
157 /// - For a slice, this is the base address.
158 pub const FAT_PTR_ADDR: usize = 0;
159
160 /// The second half of a fat pointer.
161 ///
162 /// - For a trait object, this is the address of the vtable.
163 /// - For a slice, this is the length.
164 pub const FAT_PTR_EXTRA: usize = 1;
165
166 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
167 pub enum LayoutError<'tcx> {
168     Unknown(Ty<'tcx>),
169     SizeOverflow(Ty<'tcx>),
170 }
171
172 impl<'tcx> fmt::Display for LayoutError<'tcx> {
173     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
174         match *self {
175             LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
176             LayoutError::SizeOverflow(ty) => {
177                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
178             }
179         }
180     }
181 }
182
183 fn layout_raw<'tcx>(
184     tcx: TyCtxt<'tcx>,
185     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
186 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
187     ty::tls::with_related_context(tcx, move |icx| {
188         let rec_limit = *tcx.sess.recursion_limit.get();
189         let (param_env, ty) = query.into_parts();
190
191         if icx.layout_depth > rec_limit {
192             tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
193         }
194
195         // Update the ImplicitCtxt to increase the layout_depth
196         let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
197
198         ty::tls::enter_context(&icx, |_| {
199             let cx = LayoutCx { tcx, param_env };
200             let layout = cx.layout_raw_uncached(ty);
201             // Type-level uninhabitedness should always imply ABI uninhabitedness.
202             if let Ok(layout) = layout {
203                 if ty.conservative_is_privately_uninhabited(tcx) {
204                     assert!(layout.abi.is_uninhabited());
205                 }
206             }
207             layout
208         })
209     })
210 }
211
212 pub fn provide(providers: &mut ty::query::Providers<'_>) {
213     *providers = ty::query::Providers { layout_raw, ..*providers };
214 }
215
216 pub struct LayoutCx<'tcx, C> {
217     pub tcx: C,
218     pub param_env: ty::ParamEnv<'tcx>,
219 }
220
221 #[derive(Copy, Clone, Debug)]
222 enum StructKind {
223     /// A tuple, closure, or univariant which cannot be coerced to unsized.
224     AlwaysSized,
225     /// A univariant, the last field of which may be coerced to unsized.
226     MaybeUnsized,
227     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
228     Prefixed(Size, Align),
229 }
230
231 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
232 // This is used to go between `memory_index` (source field order to memory order)
233 // and `inverse_memory_index` (memory order to source field order).
234 // See also `FieldPlacement::Arbitrary::memory_index` for more details.
235 // FIXME(eddyb) build a better abstraction for permutations, if possible.
236 fn invert_mapping(map: &[u32]) -> Vec<u32> {
237     let mut inverse = vec![0; map.len()];
238     for i in 0..map.len() {
239         inverse[map[i] as usize] = i as u32;
240     }
241     inverse
242 }
243
244 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
245     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
246         let dl = self.data_layout();
247         let b_align = b.value.align(dl);
248         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
249         let b_offset = a.value.size(dl).align_to(b_align.abi);
250         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
251
252         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
253         // returns the last maximum.
254         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
255             .into_iter()
256             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
257             .max_by_key(|niche| niche.available(dl));
258
259         LayoutDetails {
260             variants: Variants::Single { index: VariantIdx::new(0) },
261             fields: FieldPlacement::Arbitrary {
262                 offsets: vec![Size::ZERO, b_offset],
263                 memory_index: vec![0, 1],
264             },
265             abi: Abi::ScalarPair(a, b),
266             largest_niche,
267             align,
268             size,
269         }
270     }
271
272     fn univariant_uninterned(
273         &self,
274         ty: Ty<'tcx>,
275         fields: &[TyLayout<'_>],
276         repr: &ReprOptions,
277         kind: StructKind,
278     ) -> Result<LayoutDetails, LayoutError<'tcx>> {
279         let dl = self.data_layout();
280         let pack = repr.pack;
281         if pack.is_some() && repr.align.is_some() {
282             bug!("struct cannot be packed and aligned");
283         }
284
285         let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
286
287         let mut sized = true;
288         let mut offsets = vec![Size::ZERO; fields.len()];
289         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
290
291         let mut optimize = !repr.inhibit_struct_field_reordering_opt();
292         if let StructKind::Prefixed(_, align) = kind {
293             optimize &= align.bytes() == 1;
294         }
295
296         if optimize {
297             let end =
298                 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
299             let optimizing = &mut inverse_memory_index[..end];
300             let field_align = |f: &TyLayout<'_>| {
301                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
302             };
303             match kind {
304                 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
305                     optimizing.sort_by_key(|&x| {
306                         // Place ZSTs first to avoid "interesting offsets",
307                         // especially with only one or two non-ZST fields.
308                         let f = &fields[x as usize];
309                         (!f.is_zst(), cmp::Reverse(field_align(f)))
310                     });
311                 }
312                 StructKind::Prefixed(..) => {
313                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
314                 }
315             }
316         }
317
318         // inverse_memory_index holds field indices by increasing memory offset.
319         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
320         // We now write field offsets to the corresponding offset slot;
321         // field 5 with offset 0 puts 0 in offsets[5].
322         // At the bottom of this function, we invert `inverse_memory_index` to
323         // produce `memory_index` (see `invert_mapping`).
324
325         let mut offset = Size::ZERO;
326         let mut largest_niche = None;
327         let mut largest_niche_available = 0;
328
329         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
330             let prefix_align =
331                 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
332             align = align.max(AbiAndPrefAlign::new(prefix_align));
333             offset = prefix_size.align_to(prefix_align);
334         }
335
336         for &i in &inverse_memory_index {
337             let field = fields[i as usize];
338             if !sized {
339                 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
340             }
341
342             if field.is_unsized() {
343                 sized = false;
344             }
345
346             // Invariant: offset < dl.obj_size_bound() <= 1<<61
347             let field_align = if let Some(pack) = pack {
348                 field.align.min(AbiAndPrefAlign::new(pack))
349             } else {
350                 field.align
351             };
352             offset = offset.align_to(field_align.abi);
353             align = align.max(field_align);
354
355             debug!("univariant offset: {:?} field: {:#?}", offset, field);
356             offsets[i as usize] = offset;
357
358             if !repr.hide_niche() {
359                 if let Some(mut niche) = field.largest_niche.clone() {
360                     let available = niche.available(dl);
361                     if available > largest_niche_available {
362                         largest_niche_available = available;
363                         niche.offset += offset;
364                         largest_niche = Some(niche);
365                     }
366                 }
367             }
368
369             offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
370         }
371
372         if let Some(repr_align) = repr.align {
373             align = align.max(AbiAndPrefAlign::new(repr_align));
374         }
375
376         debug!("univariant min_size: {:?}", offset);
377         let min_size = offset;
378
379         // As stated above, inverse_memory_index holds field indices by increasing offset.
380         // This makes it an already-sorted view of the offsets vec.
381         // To invert it, consider:
382         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
383         // Field 5 would be the first element, so memory_index is i:
384         // Note: if we didn't optimize, it's already right.
385
386         let memory_index;
387         if optimize {
388             memory_index = invert_mapping(&inverse_memory_index);
389         } else {
390             memory_index = inverse_memory_index;
391         }
392
393         let size = min_size.align_to(align.abi);
394         let mut abi = Abi::Aggregate { sized };
395
396         // Unpack newtype ABIs and find scalar pairs.
397         if sized && size.bytes() > 0 {
398             // All other fields must be ZSTs, and we need them to all start at 0.
399             let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
400             if zst_offsets.all(|(_, o)| o.bytes() == 0) {
401                 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
402
403                 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
404                     // We have exactly one non-ZST field.
405                     (Some((i, field)), None, None) => {
406                         // Field fills the struct and it has a scalar or scalar pair ABI.
407                         if offsets[i].bytes() == 0
408                             && align.abi == field.align.abi
409                             && size == field.size
410                         {
411                             match field.abi {
412                                 // For plain scalars, or vectors of them, we can't unpack
413                                 // newtypes for `#[repr(C)]`, as that affects C ABIs.
414                                 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
415                                     abi = field.abi.clone();
416                                 }
417                                 // But scalar pairs are Rust-specific and get
418                                 // treated as aggregates by C ABIs anyway.
419                                 Abi::ScalarPair(..) => {
420                                     abi = field.abi.clone();
421                                 }
422                                 _ => {}
423                             }
424                         }
425                     }
426
427                     // Two non-ZST fields, and they're both scalars.
428                     (
429                         Some((
430                             i,
431                             &TyLayout {
432                                 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. },
433                                 ..
434                             },
435                         )),
436                         Some((
437                             j,
438                             &TyLayout {
439                                 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. },
440                                 ..
441                             },
442                         )),
443                         None,
444                     ) => {
445                         // Order by the memory placement, not source order.
446                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
447                             ((i, a), (j, b))
448                         } else {
449                             ((j, b), (i, a))
450                         };
451                         let pair = self.scalar_pair(a.clone(), b.clone());
452                         let pair_offsets = match pair.fields {
453                             FieldPlacement::Arbitrary { ref offsets, ref memory_index } => {
454                                 assert_eq!(memory_index, &[0, 1]);
455                                 offsets
456                             }
457                             _ => bug!(),
458                         };
459                         if offsets[i] == pair_offsets[0]
460                             && offsets[j] == pair_offsets[1]
461                             && align == pair.align
462                             && size == pair.size
463                         {
464                             // We can use `ScalarPair` only when it matches our
465                             // already computed layout (including `#[repr(C)]`).
466                             abi = pair.abi;
467                         }
468                     }
469
470                     _ => {}
471                 }
472             }
473         }
474
475         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
476             abi = Abi::Uninhabited;
477         }
478
479         Ok(LayoutDetails {
480             variants: Variants::Single { index: VariantIdx::new(0) },
481             fields: FieldPlacement::Arbitrary { offsets, memory_index },
482             abi,
483             largest_niche,
484             align,
485             size,
486         })
487     }
488
489     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
490         let tcx = self.tcx;
491         let param_env = self.param_env;
492         let dl = self.data_layout();
493         let scalar_unit = |value: Primitive| {
494             let bits = value.size(dl).bits();
495             assert!(bits <= 128);
496             Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
497         };
498         let scalar =
499             |value: Primitive| tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)));
500
501         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
502             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
503         };
504         debug_assert!(!ty.has_infer_types_or_consts());
505
506         Ok(match ty.kind {
507             // Basic scalars.
508             ty::Bool => tcx.intern_layout(LayoutDetails::scalar(
509                 self,
510                 Scalar { value: Int(I8, false), valid_range: 0..=1 },
511             )),
512             ty::Char => tcx.intern_layout(LayoutDetails::scalar(
513                 self,
514                 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
515             )),
516             ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
517             ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
518             ty::Float(fty) => scalar(match fty {
519                 ast::FloatTy::F32 => F32,
520                 ast::FloatTy::F64 => F64,
521             }),
522             ty::FnPtr(_) => {
523                 let mut ptr = scalar_unit(Pointer);
524                 ptr.valid_range = 1..=*ptr.valid_range.end();
525                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
526             }
527
528             // The never type.
529             ty::Never => tcx.intern_layout(LayoutDetails {
530                 variants: Variants::Single { index: VariantIdx::new(0) },
531                 fields: FieldPlacement::Union(0),
532                 abi: Abi::Uninhabited,
533                 largest_niche: None,
534                 align: dl.i8_align,
535                 size: Size::ZERO,
536             }),
537
538             // Potentially-fat pointers.
539             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
540                 let mut data_ptr = scalar_unit(Pointer);
541                 if !ty.is_unsafe_ptr() {
542                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
543                 }
544
545                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
546                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
547                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
548                 }
549
550                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
551                 let metadata = match unsized_part.kind {
552                     ty::Foreign(..) => {
553                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
554                     }
555                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
556                     ty::Dynamic(..) => {
557                         let mut vtable = scalar_unit(Pointer);
558                         vtable.valid_range = 1..=*vtable.valid_range.end();
559                         vtable
560                     }
561                     _ => return Err(LayoutError::Unknown(unsized_part)),
562                 };
563
564                 // Effectively a (ptr, meta) tuple.
565                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
566             }
567
568             // Arrays and slices.
569             ty::Array(element, mut count) => {
570                 if count.has_projections() {
571                     count = tcx.normalize_erasing_regions(param_env, count);
572                     if count.has_projections() {
573                         return Err(LayoutError::Unknown(ty));
574                     }
575                 }
576
577                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
578                 let element = self.layout_of(element)?;
579                 let size =
580                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
581
582                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
583                     Abi::Uninhabited
584                 } else {
585                     Abi::Aggregate { sized: true }
586                 };
587
588                 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
589
590                 tcx.intern_layout(LayoutDetails {
591                     variants: Variants::Single { index: VariantIdx::new(0) },
592                     fields: FieldPlacement::Array { stride: element.size, count },
593                     abi,
594                     largest_niche,
595                     align: element.align,
596                     size,
597                 })
598             }
599             ty::Slice(element) => {
600                 let element = self.layout_of(element)?;
601                 tcx.intern_layout(LayoutDetails {
602                     variants: Variants::Single { index: VariantIdx::new(0) },
603                     fields: FieldPlacement::Array { stride: element.size, count: 0 },
604                     abi: Abi::Aggregate { sized: false },
605                     largest_niche: None,
606                     align: element.align,
607                     size: Size::ZERO,
608                 })
609             }
610             ty::Str => tcx.intern_layout(LayoutDetails {
611                 variants: Variants::Single { index: VariantIdx::new(0) },
612                 fields: FieldPlacement::Array { stride: Size::from_bytes(1), count: 0 },
613                 abi: Abi::Aggregate { sized: false },
614                 largest_niche: None,
615                 align: dl.i8_align,
616                 size: Size::ZERO,
617             }),
618
619             // Odd unit types.
620             ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
621             ty::Dynamic(..) | ty::Foreign(..) => {
622                 let mut unit = self.univariant_uninterned(
623                     ty,
624                     &[],
625                     &ReprOptions::default(),
626                     StructKind::AlwaysSized,
627                 )?;
628                 match unit.abi {
629                     Abi::Aggregate { ref mut sized } => *sized = false,
630                     _ => bug!(),
631                 }
632                 tcx.intern_layout(unit)
633             }
634
635             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
636
637             ty::Closure(def_id, ref substs) => {
638                 let tys = substs.as_closure().upvar_tys(def_id, tcx);
639                 univariant(
640                     &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
641                     &ReprOptions::default(),
642                     StructKind::AlwaysSized,
643                 )?
644             }
645
646             ty::Tuple(tys) => {
647                 let kind =
648                     if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
649
650                 univariant(
651                     &tys.iter()
652                         .map(|k| self.layout_of(k.expect_ty()))
653                         .collect::<Result<Vec<_>, _>>()?,
654                     &ReprOptions::default(),
655                     kind,
656                 )?
657             }
658
659             // SIMD vector types.
660             ty::Adt(def, ..) if def.repr.simd() => {
661                 let element = self.layout_of(ty.simd_type(tcx))?;
662                 let count = ty.simd_size(tcx);
663                 assert!(count > 0);
664                 let scalar = match element.abi {
665                     Abi::Scalar(ref scalar) => scalar.clone(),
666                     _ => {
667                         tcx.sess.fatal(&format!(
668                             "monomorphising SIMD type `{}` with \
669                                                  a non-machine element type `{}`",
670                             ty, element.ty
671                         ));
672                     }
673                 };
674                 let size =
675                     element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
676                 let align = dl.vector_align(size);
677                 let size = size.align_to(align.abi);
678
679                 tcx.intern_layout(LayoutDetails {
680                     variants: Variants::Single { index: VariantIdx::new(0) },
681                     fields: FieldPlacement::Array { stride: element.size, count },
682                     abi: Abi::Vector { element: scalar, count },
683                     largest_niche: element.largest_niche.clone(),
684                     size,
685                     align,
686                 })
687             }
688
689             // ADTs.
690             ty::Adt(def, substs) => {
691                 // Cache the field layouts.
692                 let variants = def
693                     .variants
694                     .iter()
695                     .map(|v| {
696                         v.fields
697                             .iter()
698                             .map(|field| self.layout_of(field.ty(tcx, substs)))
699                             .collect::<Result<Vec<_>, _>>()
700                     })
701                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
702
703                 if def.is_union() {
704                     if def.repr.pack.is_some() && def.repr.align.is_some() {
705                         bug!("union cannot be packed and aligned");
706                     }
707
708                     let mut align =
709                         if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
710
711                     if let Some(repr_align) = def.repr.align {
712                         align = align.max(AbiAndPrefAlign::new(repr_align));
713                     }
714
715                     let optimize = !def.repr.inhibit_union_abi_opt();
716                     let mut size = Size::ZERO;
717                     let mut abi = Abi::Aggregate { sized: true };
718                     let index = VariantIdx::new(0);
719                     for field in &variants[index] {
720                         assert!(!field.is_unsized());
721                         align = align.max(field.align);
722
723                         // If all non-ZST fields have the same ABI, forward this ABI
724                         if optimize && !field.is_zst() {
725                             // Normalize scalar_unit to the maximal valid range
726                             let field_abi = match &field.abi {
727                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
728                                 Abi::ScalarPair(x, y) => {
729                                     Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
730                                 }
731                                 Abi::Vector { element: x, count } => {
732                                     Abi::Vector { element: scalar_unit(x.value), count: *count }
733                                 }
734                                 Abi::Uninhabited | Abi::Aggregate { .. } => {
735                                     Abi::Aggregate { sized: true }
736                                 }
737                             };
738
739                             if size == Size::ZERO {
740                                 // first non ZST: initialize 'abi'
741                                 abi = field_abi;
742                             } else if abi != field_abi {
743                                 // different fields have different ABI: reset to Aggregate
744                                 abi = Abi::Aggregate { sized: true };
745                             }
746                         }
747
748                         size = cmp::max(size, field.size);
749                     }
750
751                     if let Some(pack) = def.repr.pack {
752                         align = align.min(AbiAndPrefAlign::new(pack));
753                     }
754
755                     return Ok(tcx.intern_layout(LayoutDetails {
756                         variants: Variants::Single { index },
757                         fields: FieldPlacement::Union(variants[index].len()),
758                         abi,
759                         largest_niche: None,
760                         align,
761                         size: size.align_to(align.abi),
762                     }));
763                 }
764
765                 // A variant is absent if it's uninhabited and only has ZST fields.
766                 // Present uninhabited variants only require space for their fields,
767                 // but *not* an encoding of the discriminant (e.g., a tag value).
768                 // See issue #49298 for more details on the need to leave space
769                 // for non-ZST uninhabited data (mostly partial initialization).
770                 let absent = |fields: &[TyLayout<'_>]| {
771                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
772                     let is_zst = fields.iter().all(|f| f.is_zst());
773                     uninhabited && is_zst
774                 };
775                 let (present_first, present_second) = {
776                     let mut present_variants = variants
777                         .iter_enumerated()
778                         .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
779                     (present_variants.next(), present_variants.next())
780                 };
781                 let present_first = match present_first {
782                     present_first @ Some(_) => present_first,
783                     // Uninhabited because it has no variants, or only absent ones.
784                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
785                     // if it's a struct, still compute a layout so that we can still compute the
786                     // field offsets
787                     None => Some(VariantIdx::new(0)),
788                 };
789
790                 let is_struct = !def.is_enum() ||
791                     // Only one variant is present.
792                     (present_second.is_none() &&
793                     // Representation optimizations are allowed.
794                     !def.repr.inhibit_enum_layout_opt());
795                 if is_struct {
796                     // Struct, or univariant enum equivalent to a struct.
797                     // (Typechecking will reject discriminant-sizing attrs.)
798
799                     let v = present_first.unwrap();
800                     let kind = if def.is_enum() || variants[v].is_empty() {
801                         StructKind::AlwaysSized
802                     } else {
803                         let param_env = tcx.param_env(def.did);
804                         let last_field = def.variants[v].fields.last().unwrap();
805                         let always_sized =
806                             tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
807                         if !always_sized {
808                             StructKind::MaybeUnsized
809                         } else {
810                             StructKind::AlwaysSized
811                         }
812                     };
813
814                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
815                     st.variants = Variants::Single { index: v };
816                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
817                     match st.abi {
818                         Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
819                             // the asserts ensure that we are not using the
820                             // `#[rustc_layout_scalar_valid_range(n)]`
821                             // attribute to widen the range of anything as that would probably
822                             // result in UB somewhere
823                             // FIXME(eddyb) the asserts are probably not needed,
824                             // as larger validity ranges would result in missed
825                             // optimizations, *not* wrongly assuming the inner
826                             // value is valid. e.g. unions enlarge validity ranges,
827                             // because the values may be uninitialized.
828                             if let Bound::Included(start) = start {
829                                 // FIXME(eddyb) this might be incorrect - it doesn't
830                                 // account for wrap-around (end < start) ranges.
831                                 assert!(*scalar.valid_range.start() <= start);
832                                 scalar.valid_range = start..=*scalar.valid_range.end();
833                             }
834                             if let Bound::Included(end) = end {
835                                 // FIXME(eddyb) this might be incorrect - it doesn't
836                                 // account for wrap-around (end < start) ranges.
837                                 assert!(*scalar.valid_range.end() >= end);
838                                 scalar.valid_range = *scalar.valid_range.start()..=end;
839                             }
840
841                             // Update `largest_niche` if we have introduced a larger niche.
842                             let niche = if def.repr.hide_niche() {
843                                 None
844                             } else {
845                                 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
846                             };
847                             if let Some(niche) = niche {
848                                 match &st.largest_niche {
849                                     Some(largest_niche) => {
850                                         // Replace the existing niche even if they're equal,
851                                         // because this one is at a lower offset.
852                                         if largest_niche.available(dl) <= niche.available(dl) {
853                                             st.largest_niche = Some(niche);
854                                         }
855                                     }
856                                     None => st.largest_niche = Some(niche),
857                                 }
858                             }
859                         }
860                         _ => assert!(
861                             start == Bound::Unbounded && end == Bound::Unbounded,
862                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
863                             def,
864                             st,
865                         ),
866                     }
867
868                     return Ok(tcx.intern_layout(st));
869                 }
870
871                 // At this point, we have handled all unions and
872                 // structs. (We have also handled univariant enums
873                 // that allow representation optimization.)
874                 assert!(def.is_enum());
875
876                 // The current code for niche-filling relies on variant indices
877                 // instead of actual discriminants, so dataful enums with
878                 // explicit discriminants (RFC #2363) would misbehave.
879                 let no_explicit_discriminants = def
880                     .variants
881                     .iter_enumerated()
882                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
883
884                 // Niche-filling enum optimization.
885                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
886                     let mut dataful_variant = None;
887                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
888
889                     // Find one non-ZST variant.
890                     'variants: for (v, fields) in variants.iter_enumerated() {
891                         if absent(fields) {
892                             continue 'variants;
893                         }
894                         for f in fields {
895                             if !f.is_zst() {
896                                 if dataful_variant.is_none() {
897                                     dataful_variant = Some(v);
898                                     continue 'variants;
899                                 } else {
900                                     dataful_variant = None;
901                                     break 'variants;
902                                 }
903                             }
904                         }
905                         niche_variants = *niche_variants.start().min(&v)..=v;
906                     }
907
908                     if niche_variants.start() > niche_variants.end() {
909                         dataful_variant = None;
910                     }
911
912                     if let Some(i) = dataful_variant {
913                         let count = (niche_variants.end().as_u32()
914                             - niche_variants.start().as_u32()
915                             + 1) as u128;
916                         // FIXME(#62691) use the largest niche across all fields,
917                         // not just the first one.
918                         for (field_index, &field) in variants[i].iter().enumerate() {
919                             let niche = match &field.largest_niche {
920                                 Some(niche) => niche,
921                                 _ => continue,
922                             };
923                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
924                                 Some(pair) => pair,
925                                 None => continue,
926                             };
927
928                             let mut align = dl.aggregate_align;
929                             let st = variants
930                                 .iter_enumerated()
931                                 .map(|(j, v)| {
932                                     let mut st = self.univariant_uninterned(
933                                         ty,
934                                         v,
935                                         &def.repr,
936                                         StructKind::AlwaysSized,
937                                     )?;
938                                     st.variants = Variants::Single { index: j };
939
940                                     align = align.max(st.align);
941
942                                     Ok(st)
943                                 })
944                                 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
945
946                             let offset = st[i].fields.offset(field_index) + niche.offset;
947                             let size = st[i].size;
948
949                             let mut abi = match st[i].abi {
950                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
951                                 Abi::ScalarPair(ref first, ref second) => {
952                                     // We need to use scalar_unit to reset the
953                                     // valid range to the maximal one for that
954                                     // primitive, because only the niche is
955                                     // guaranteed to be initialised, not the
956                                     // other primitive.
957                                     if offset.bytes() == 0 {
958                                         Abi::ScalarPair(
959                                             niche_scalar.clone(),
960                                             scalar_unit(second.value),
961                                         )
962                                     } else {
963                                         Abi::ScalarPair(
964                                             scalar_unit(first.value),
965                                             niche_scalar.clone(),
966                                         )
967                                     }
968                                 }
969                                 _ => Abi::Aggregate { sized: true },
970                             };
971
972                             if st.iter().all(|v| v.abi.is_uninhabited()) {
973                                 abi = Abi::Uninhabited;
974                             }
975
976                             let largest_niche =
977                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
978
979                             return Ok(tcx.intern_layout(LayoutDetails {
980                                 variants: Variants::Multiple {
981                                     discr: niche_scalar,
982                                     discr_kind: DiscriminantKind::Niche {
983                                         dataful_variant: i,
984                                         niche_variants,
985                                         niche_start,
986                                     },
987                                     discr_index: 0,
988                                     variants: st,
989                                 },
990                                 fields: FieldPlacement::Arbitrary {
991                                     offsets: vec![offset],
992                                     memory_index: vec![0],
993                                 },
994                                 abi,
995                                 largest_niche,
996                                 size,
997                                 align,
998                             }));
999                         }
1000                     }
1001                 }
1002
1003                 let (mut min, mut max) = (i128::MAX, i128::MIN);
1004                 let discr_type = def.repr.discr_type();
1005                 let bits = Integer::from_attr(self, discr_type).size().bits();
1006                 for (i, discr) in def.discriminants(tcx) {
1007                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1008                         continue;
1009                     }
1010                     let mut x = discr.val as i128;
1011                     if discr_type.is_signed() {
1012                         // sign extend the raw representation to be an i128
1013                         x = (x << (128 - bits)) >> (128 - bits);
1014                     }
1015                     if x < min {
1016                         min = x;
1017                     }
1018                     if x > max {
1019                         max = x;
1020                     }
1021                 }
1022                 // We might have no inhabited variants, so pretend there's at least one.
1023                 if (min, max) == (i128::MAX, i128::MIN) {
1024                     min = 0;
1025                     max = 0;
1026                 }
1027                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1028                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1029
1030                 let mut align = dl.aggregate_align;
1031                 let mut size = Size::ZERO;
1032
1033                 // We're interested in the smallest alignment, so start large.
1034                 let mut start_align = Align::from_bytes(256).unwrap();
1035                 assert_eq!(Integer::for_align(dl, start_align), None);
1036
1037                 // repr(C) on an enum tells us to make a (tag, union) layout,
1038                 // so we need to grow the prefix alignment to be at least
1039                 // the alignment of the union. (This value is used both for
1040                 // determining the alignment of the overall enum, and the
1041                 // determining the alignment of the payload after the tag.)
1042                 let mut prefix_align = min_ity.align(dl).abi;
1043                 if def.repr.c() {
1044                     for fields in &variants {
1045                         for field in fields {
1046                             prefix_align = prefix_align.max(field.align.abi);
1047                         }
1048                     }
1049                 }
1050
1051                 // Create the set of structs that represent each variant.
1052                 let mut layout_variants = variants
1053                     .iter_enumerated()
1054                     .map(|(i, field_layouts)| {
1055                         let mut st = self.univariant_uninterned(
1056                             ty,
1057                             &field_layouts,
1058                             &def.repr,
1059                             StructKind::Prefixed(min_ity.size(), prefix_align),
1060                         )?;
1061                         st.variants = Variants::Single { index: i };
1062                         // Find the first field we can't move later
1063                         // to make room for a larger discriminant.
1064                         for field in
1065                             st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1066                         {
1067                             if !field.is_zst() || field.align.abi.bytes() != 1 {
1068                                 start_align = start_align.min(field.align.abi);
1069                                 break;
1070                             }
1071                         }
1072                         size = cmp::max(size, st.size);
1073                         align = align.max(st.align);
1074                         Ok(st)
1075                     })
1076                     .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1077
1078                 // Align the maximum variant size to the largest alignment.
1079                 size = size.align_to(align.abi);
1080
1081                 if size.bytes() >= dl.obj_size_bound() {
1082                     return Err(LayoutError::SizeOverflow(ty));
1083                 }
1084
1085                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1086                 if typeck_ity < min_ity {
1087                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1088                     // some reason at this point (based on values discriminant can take on). Mostly
1089                     // because this discriminant will be loaded, and then stored into variable of
1090                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1091                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1092                     // discriminant values. That would be a bug, because then, in codegen, in order
1093                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1094                     // space necessary to represent would have to be discarded (or layout is wrong
1095                     // on thinking it needs 16 bits)
1096                     bug!(
1097                         "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1098                         min_ity,
1099                         typeck_ity
1100                     );
1101                     // However, it is fine to make discr type however large (as an optimisation)
1102                     // after this point â€“ we’ll just truncate the value we load in codegen.
1103                 }
1104
1105                 // Check to see if we should use a different type for the
1106                 // discriminant. We can safely use a type with the same size
1107                 // as the alignment of the first field of each variant.
1108                 // We increase the size of the discriminant to avoid LLVM copying
1109                 // padding when it doesn't need to. This normally causes unaligned
1110                 // load/stores and excessive memcpy/memset operations. By using a
1111                 // bigger integer size, LLVM can be sure about its contents and
1112                 // won't be so conservative.
1113
1114                 // Use the initial field alignment
1115                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1116                     min_ity
1117                 } else {
1118                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1119                 };
1120
1121                 // If the alignment is not larger than the chosen discriminant size,
1122                 // don't use the alignment as the final size.
1123                 if ity <= min_ity {
1124                     ity = min_ity;
1125                 } else {
1126                     // Patch up the variants' first few fields.
1127                     let old_ity_size = min_ity.size();
1128                     let new_ity_size = ity.size();
1129                     for variant in &mut layout_variants {
1130                         match variant.fields {
1131                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1132                                 for i in offsets {
1133                                     if *i <= old_ity_size {
1134                                         assert_eq!(*i, old_ity_size);
1135                                         *i = new_ity_size;
1136                                     }
1137                                 }
1138                                 // We might be making the struct larger.
1139                                 if variant.size <= old_ity_size {
1140                                     variant.size = new_ity_size;
1141                                 }
1142                             }
1143                             _ => bug!(),
1144                         }
1145                     }
1146                 }
1147
1148                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1149                 let tag = Scalar {
1150                     value: Int(ity, signed),
1151                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1152                 };
1153                 let mut abi = Abi::Aggregate { sized: true };
1154                 if tag.value.size(dl) == size {
1155                     abi = Abi::Scalar(tag.clone());
1156                 } else {
1157                     // Try to use a ScalarPair for all tagged enums.
1158                     let mut common_prim = None;
1159                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1160                         let offsets = match layout_variant.fields {
1161                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1162                             _ => bug!(),
1163                         };
1164                         let mut fields =
1165                             field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1166                         let (field, offset) = match (fields.next(), fields.next()) {
1167                             (None, None) => continue,
1168                             (Some(pair), None) => pair,
1169                             _ => {
1170                                 common_prim = None;
1171                                 break;
1172                             }
1173                         };
1174                         let prim = match field.details.abi {
1175                             Abi::Scalar(ref scalar) => scalar.value,
1176                             _ => {
1177                                 common_prim = None;
1178                                 break;
1179                             }
1180                         };
1181                         if let Some(pair) = common_prim {
1182                             // This is pretty conservative. We could go fancier
1183                             // by conflating things like i32 and u32, or even
1184                             // realising that (u8, u8) could just cohabit with
1185                             // u16 or even u32.
1186                             if pair != (prim, offset) {
1187                                 common_prim = None;
1188                                 break;
1189                             }
1190                         } else {
1191                             common_prim = Some((prim, offset));
1192                         }
1193                     }
1194                     if let Some((prim, offset)) = common_prim {
1195                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1196                         let pair_offsets = match pair.fields {
1197                             FieldPlacement::Arbitrary { ref offsets, ref memory_index } => {
1198                                 assert_eq!(memory_index, &[0, 1]);
1199                                 offsets
1200                             }
1201                             _ => bug!(),
1202                         };
1203                         if pair_offsets[0] == Size::ZERO
1204                             && pair_offsets[1] == *offset
1205                             && align == pair.align
1206                             && size == pair.size
1207                         {
1208                             // We can use `ScalarPair` only when it matches our
1209                             // already computed layout (including `#[repr(C)]`).
1210                             abi = pair.abi;
1211                         }
1212                     }
1213                 }
1214
1215                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1216                     abi = Abi::Uninhabited;
1217                 }
1218
1219                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1220
1221                 tcx.intern_layout(LayoutDetails {
1222                     variants: Variants::Multiple {
1223                         discr: tag,
1224                         discr_kind: DiscriminantKind::Tag,
1225                         discr_index: 0,
1226                         variants: layout_variants,
1227                     },
1228                     fields: FieldPlacement::Arbitrary {
1229                         offsets: vec![Size::ZERO],
1230                         memory_index: vec![0],
1231                     },
1232                     largest_niche,
1233                     abi,
1234                     align,
1235                     size,
1236                 })
1237             }
1238
1239             // Types with no meaningful known layout.
1240             ty::Projection(_) | ty::Opaque(..) => {
1241                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1242                 if ty == normalized {
1243                     return Err(LayoutError::Unknown(ty));
1244                 }
1245                 tcx.layout_raw(param_env.and(normalized))?
1246             }
1247
1248             ty::Bound(..)
1249             | ty::Placeholder(..)
1250             | ty::UnnormalizedProjection(..)
1251             | ty::GeneratorWitness(..)
1252             | ty::Infer(_) => bug!("LayoutDetails::compute: unexpected type `{}`", ty),
1253
1254             ty::Param(_) | ty::Error => {
1255                 return Err(LayoutError::Unknown(ty));
1256             }
1257         })
1258     }
1259 }
1260
1261 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1262 #[derive(Clone, Debug, PartialEq)]
1263 enum SavedLocalEligibility {
1264     Unassigned,
1265     Assigned(VariantIdx),
1266     // FIXME: Use newtype_index so we aren't wasting bytes
1267     Ineligible(Option<u32>),
1268 }
1269
1270 // When laying out generators, we divide our saved local fields into two
1271 // categories: overlap-eligible and overlap-ineligible.
1272 //
1273 // Those fields which are ineligible for overlap go in a "prefix" at the
1274 // beginning of the layout, and always have space reserved for them.
1275 //
1276 // Overlap-eligible fields are only assigned to one variant, so we lay
1277 // those fields out for each variant and put them right after the
1278 // prefix.
1279 //
1280 // Finally, in the layout details, we point to the fields from the
1281 // variants they are assigned to. It is possible for some fields to be
1282 // included in multiple variants. No field ever "moves around" in the
1283 // layout; its offset is always the same.
1284 //
1285 // Also included in the layout are the upvars and the discriminant.
1286 // These are included as fields on the "outer" layout; they are not part
1287 // of any variant.
1288 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1289     /// Compute the eligibility and assignment of each local.
1290     fn generator_saved_local_eligibility(
1291         &self,
1292         info: &GeneratorLayout<'tcx>,
1293     ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1294         use SavedLocalEligibility::*;
1295
1296         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1297             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1298
1299         // The saved locals not eligible for overlap. These will get
1300         // "promoted" to the prefix of our generator.
1301         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1302
1303         // Figure out which of our saved locals are fields in only
1304         // one variant. The rest are deemed ineligible for overlap.
1305         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1306             for local in fields {
1307                 match assignments[*local] {
1308                     Unassigned => {
1309                         assignments[*local] = Assigned(variant_index);
1310                     }
1311                     Assigned(idx) => {
1312                         // We've already seen this local at another suspension
1313                         // point, so it is no longer a candidate.
1314                         trace!(
1315                             "removing local {:?} in >1 variant ({:?}, {:?})",
1316                             local,
1317                             variant_index,
1318                             idx
1319                         );
1320                         ineligible_locals.insert(*local);
1321                         assignments[*local] = Ineligible(None);
1322                     }
1323                     Ineligible(_) => {}
1324                 }
1325             }
1326         }
1327
1328         // Next, check every pair of eligible locals to see if they
1329         // conflict.
1330         for local_a in info.storage_conflicts.rows() {
1331             let conflicts_a = info.storage_conflicts.count(local_a);
1332             if ineligible_locals.contains(local_a) {
1333                 continue;
1334             }
1335
1336             for local_b in info.storage_conflicts.iter(local_a) {
1337                 // local_a and local_b are storage live at the same time, therefore they
1338                 // cannot overlap in the generator layout. The only way to guarantee
1339                 // this is if they are in the same variant, or one is ineligible
1340                 // (which means it is stored in every variant).
1341                 if ineligible_locals.contains(local_b)
1342                     || assignments[local_a] == assignments[local_b]
1343                 {
1344                     continue;
1345                 }
1346
1347                 // If they conflict, we will choose one to make ineligible.
1348                 // This is not always optimal; it's just a greedy heuristic that
1349                 // seems to produce good results most of the time.
1350                 let conflicts_b = info.storage_conflicts.count(local_b);
1351                 let (remove, other) =
1352                     if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1353                 ineligible_locals.insert(remove);
1354                 assignments[remove] = Ineligible(None);
1355                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1356             }
1357         }
1358
1359         // Count the number of variants in use. If only one of them, then it is
1360         // impossible to overlap any locals in our layout. In this case it's
1361         // always better to make the remaining locals ineligible, so we can
1362         // lay them out with the other locals in the prefix and eliminate
1363         // unnecessary padding bytes.
1364         {
1365             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1366             for assignment in &assignments {
1367                 match assignment {
1368                     Assigned(idx) => {
1369                         used_variants.insert(*idx);
1370                     }
1371                     _ => {}
1372                 }
1373             }
1374             if used_variants.count() < 2 {
1375                 for assignment in assignments.iter_mut() {
1376                     *assignment = Ineligible(None);
1377                 }
1378                 ineligible_locals.insert_all();
1379             }
1380         }
1381
1382         // Write down the order of our locals that will be promoted to the prefix.
1383         {
1384             for (idx, local) in ineligible_locals.iter().enumerate() {
1385                 assignments[local] = Ineligible(Some(idx as u32));
1386             }
1387         }
1388         debug!("generator saved local assignments: {:?}", assignments);
1389
1390         (ineligible_locals, assignments)
1391     }
1392
1393     /// Compute the full generator layout.
1394     fn generator_layout(
1395         &self,
1396         ty: Ty<'tcx>,
1397         def_id: hir::def_id::DefId,
1398         substs: SubstsRef<'tcx>,
1399     ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1400         use SavedLocalEligibility::*;
1401         let tcx = self.tcx;
1402
1403         let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1404
1405         let info = tcx.generator_layout(def_id);
1406         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1407
1408         // Build a prefix layout, including "promoting" all ineligible
1409         // locals as part of the prefix. We compute the layout of all of
1410         // these fields at once to get optimal packing.
1411         let discr_index = substs.as_generator().prefix_tys(def_id, tcx).count();
1412
1413         // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1414         let max_discr = (info.variant_fields.len() - 1) as u128;
1415         let discr_int = Integer::fit_unsigned(max_discr);
1416         let discr_int_ty = discr_int.to_ty(tcx, false);
1417         let discr = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1418         let discr_layout = self.tcx.intern_layout(LayoutDetails::scalar(self, discr.clone()));
1419         let discr_layout = TyLayout { ty: discr_int_ty, details: discr_layout };
1420
1421         let promoted_layouts = ineligible_locals
1422             .iter()
1423             .map(|local| subst_field(info.field_tys[local]))
1424             .map(|ty| tcx.mk_maybe_uninit(ty))
1425             .map(|ty| self.layout_of(ty));
1426         let prefix_layouts = substs
1427             .as_generator()
1428             .prefix_tys(def_id, tcx)
1429             .map(|ty| self.layout_of(ty))
1430             .chain(iter::once(Ok(discr_layout)))
1431             .chain(promoted_layouts)
1432             .collect::<Result<Vec<_>, _>>()?;
1433         let prefix = self.univariant_uninterned(
1434             ty,
1435             &prefix_layouts,
1436             &ReprOptions::default(),
1437             StructKind::AlwaysSized,
1438         )?;
1439
1440         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1441
1442         // Split the prefix layout into the "outer" fields (upvars and
1443         // discriminant) and the "promoted" fields. Promoted fields will
1444         // get included in each variant that requested them in
1445         // GeneratorLayout.
1446         debug!("prefix = {:#?}", prefix);
1447         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1448             FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1449                 let mut inverse_memory_index = invert_mapping(&memory_index);
1450
1451                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1452                 // "outer" and "promoted" fields respectively.
1453                 let b_start = (discr_index + 1) as u32;
1454                 let offsets_b = offsets.split_off(b_start as usize);
1455                 let offsets_a = offsets;
1456
1457                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1458                 // by preserving the order but keeping only one disjoint "half" each.
1459                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1460                 let inverse_memory_index_b: Vec<_> =
1461                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1462                 inverse_memory_index.retain(|&i| i < b_start);
1463                 let inverse_memory_index_a = inverse_memory_index;
1464
1465                 // Since `inverse_memory_index_{a,b}` each only refer to their
1466                 // respective fields, they can be safely inverted
1467                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1468                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1469
1470                 let outer_fields =
1471                     FieldPlacement::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1472                 (outer_fields, offsets_b, memory_index_b)
1473             }
1474             _ => bug!(),
1475         };
1476
1477         let mut size = prefix.size;
1478         let mut align = prefix.align;
1479         let variants = info
1480             .variant_fields
1481             .iter_enumerated()
1482             .map(|(index, variant_fields)| {
1483                 // Only include overlap-eligible fields when we compute our variant layout.
1484                 let variant_only_tys = variant_fields
1485                     .iter()
1486                     .filter(|local| match assignments[**local] {
1487                         Unassigned => bug!(),
1488                         Assigned(v) if v == index => true,
1489                         Assigned(_) => bug!("assignment does not match variant"),
1490                         Ineligible(_) => false,
1491                     })
1492                     .map(|local| subst_field(info.field_tys[*local]));
1493
1494                 let mut variant = self.univariant_uninterned(
1495                     ty,
1496                     &variant_only_tys
1497                         .map(|ty| self.layout_of(ty))
1498                         .collect::<Result<Vec<_>, _>>()?,
1499                     &ReprOptions::default(),
1500                     StructKind::Prefixed(prefix_size, prefix_align.abi),
1501                 )?;
1502                 variant.variants = Variants::Single { index };
1503
1504                 let (offsets, memory_index) = match variant.fields {
1505                     FieldPlacement::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1506                     _ => bug!(),
1507                 };
1508
1509                 // Now, stitch the promoted and variant-only fields back together in
1510                 // the order they are mentioned by our GeneratorLayout.
1511                 // Because we only use some subset (that can differ between variants)
1512                 // of the promoted fields, we can't just pick those elements of the
1513                 // `promoted_memory_index` (as we'd end up with gaps).
1514                 // So instead, we build an "inverse memory_index", as if all of the
1515                 // promoted fields were being used, but leave the elements not in the
1516                 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1517                 // obtain a valid (bijective) mapping.
1518                 const INVALID_FIELD_IDX: u32 = !0;
1519                 let mut combined_inverse_memory_index =
1520                     vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1521                 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1522                 let combined_offsets = variant_fields
1523                     .iter()
1524                     .enumerate()
1525                     .map(|(i, local)| {
1526                         let (offset, memory_index) = match assignments[*local] {
1527                             Unassigned => bug!(),
1528                             Assigned(_) => {
1529                                 let (offset, memory_index) =
1530                                     offsets_and_memory_index.next().unwrap();
1531                                 (offset, promoted_memory_index.len() as u32 + memory_index)
1532                             }
1533                             Ineligible(field_idx) => {
1534                                 let field_idx = field_idx.unwrap() as usize;
1535                                 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1536                             }
1537                         };
1538                         combined_inverse_memory_index[memory_index as usize] = i as u32;
1539                         offset
1540                     })
1541                     .collect();
1542
1543                 // Remove the unused slots and invert the mapping to obtain the
1544                 // combined `memory_index` (also see previous comment).
1545                 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1546                 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1547
1548                 variant.fields = FieldPlacement::Arbitrary {
1549                     offsets: combined_offsets,
1550                     memory_index: combined_memory_index,
1551                 };
1552
1553                 size = size.max(variant.size);
1554                 align = align.max(variant.align);
1555                 Ok(variant)
1556             })
1557             .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1558
1559         size = size.align_to(align.abi);
1560
1561         let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1562         {
1563             Abi::Uninhabited
1564         } else {
1565             Abi::Aggregate { sized: true }
1566         };
1567
1568         let layout = tcx.intern_layout(LayoutDetails {
1569             variants: Variants::Multiple {
1570                 discr,
1571                 discr_kind: DiscriminantKind::Tag,
1572                 discr_index,
1573                 variants,
1574             },
1575             fields: outer_fields,
1576             abi,
1577             largest_niche: prefix.largest_niche,
1578             size,
1579             align,
1580         });
1581         debug!("generator layout ({:?}): {:#?}", ty, layout);
1582         Ok(layout)
1583     }
1584
1585     /// This is invoked by the `layout_raw` query to record the final
1586     /// layout of each type.
1587     #[inline(always)]
1588     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1589         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1590         // for dumping later.
1591         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1592             self.record_layout_for_printing_outlined(layout)
1593         }
1594     }
1595
1596     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1597         // Ignore layouts that are done with non-empty environments or
1598         // non-monomorphic layouts, as the user only wants to see the stuff
1599         // resulting from the final codegen session.
1600         if layout.ty.has_param_types() || !self.param_env.caller_bounds.is_empty() {
1601             return;
1602         }
1603
1604         // (delay format until we actually need it)
1605         let record = |kind, packed, opt_discr_size, variants| {
1606             let type_desc = format!("{:?}", layout.ty);
1607             self.tcx.sess.code_stats.record_type_size(
1608                 kind,
1609                 type_desc,
1610                 layout.align.abi,
1611                 layout.size,
1612                 packed,
1613                 opt_discr_size,
1614                 variants,
1615             );
1616         };
1617
1618         let adt_def = match layout.ty.kind {
1619             ty::Adt(ref adt_def, _) => {
1620                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1621                 adt_def
1622             }
1623
1624             ty::Closure(..) => {
1625                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1626                 record(DataTypeKind::Closure, false, None, vec![]);
1627                 return;
1628             }
1629
1630             _ => {
1631                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1632                 return;
1633             }
1634         };
1635
1636         let adt_kind = adt_def.adt_kind();
1637         let adt_packed = adt_def.repr.pack.is_some();
1638
1639         let build_variant_info = |n: Option<Ident>, flds: &[ast::Name], layout: TyLayout<'tcx>| {
1640             let mut min_size = Size::ZERO;
1641             let field_info: Vec<_> = flds
1642                 .iter()
1643                 .enumerate()
1644                 .map(|(i, &name)| match layout.field(self, i) {
1645                     Err(err) => {
1646                         bug!("no layout found for field {}: `{:?}`", name, err);
1647                     }
1648                     Ok(field_layout) => {
1649                         let offset = layout.fields.offset(i);
1650                         let field_end = offset + field_layout.size;
1651                         if min_size < field_end {
1652                             min_size = field_end;
1653                         }
1654                         session::FieldInfo {
1655                             name: name.to_string(),
1656                             offset: offset.bytes(),
1657                             size: field_layout.size.bytes(),
1658                             align: field_layout.align.abi.bytes(),
1659                         }
1660                     }
1661                 })
1662                 .collect();
1663
1664             session::VariantInfo {
1665                 name: n.map(|n| n.to_string()),
1666                 kind: if layout.is_unsized() {
1667                     session::SizeKind::Min
1668                 } else {
1669                     session::SizeKind::Exact
1670                 },
1671                 align: layout.align.abi.bytes(),
1672                 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1673                 fields: field_info,
1674             }
1675         };
1676
1677         match layout.variants {
1678             Variants::Single { index } => {
1679                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1680                 if !adt_def.variants.is_empty() {
1681                     let variant_def = &adt_def.variants[index];
1682                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1683                     record(
1684                         adt_kind.into(),
1685                         adt_packed,
1686                         None,
1687                         vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1688                     );
1689                 } else {
1690                     // (This case arises for *empty* enums; so give it
1691                     // zero variants.)
1692                     record(adt_kind.into(), adt_packed, None, vec![]);
1693                 }
1694             }
1695
1696             Variants::Multiple { ref discr, ref discr_kind, .. } => {
1697                 debug!(
1698                     "print-type-size `{:#?}` adt general variants def {}",
1699                     layout.ty,
1700                     adt_def.variants.len()
1701                 );
1702                 let variant_infos: Vec<_> = adt_def
1703                     .variants
1704                     .iter_enumerated()
1705                     .map(|(i, variant_def)| {
1706                         let fields: Vec<_> =
1707                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1708                         build_variant_info(
1709                             Some(variant_def.ident),
1710                             &fields,
1711                             layout.for_variant(self, i),
1712                         )
1713                     })
1714                     .collect();
1715                 record(
1716                     adt_kind.into(),
1717                     adt_packed,
1718                     match discr_kind {
1719                         DiscriminantKind::Tag => Some(discr.value.size(self)),
1720                         _ => None,
1721                     },
1722                     variant_infos,
1723                 );
1724             }
1725         }
1726     }
1727 }
1728
1729 /// Type size "skeleton", i.e., the only information determining a type's size.
1730 /// While this is conservative, (aside from constant sizes, only pointers,
1731 /// newtypes thereof and null pointer optimized enums are allowed), it is
1732 /// enough to statically check common use cases of transmute.
1733 #[derive(Copy, Clone, Debug)]
1734 pub enum SizeSkeleton<'tcx> {
1735     /// Any statically computable Layout.
1736     Known(Size),
1737
1738     /// A potentially-fat pointer.
1739     Pointer {
1740         /// If true, this pointer is never null.
1741         non_zero: bool,
1742         /// The type which determines the unsized metadata, if any,
1743         /// of this pointer. Either a type parameter or a projection
1744         /// depending on one, with regions erased.
1745         tail: Ty<'tcx>,
1746     },
1747 }
1748
1749 impl<'tcx> SizeSkeleton<'tcx> {
1750     pub fn compute(
1751         ty: Ty<'tcx>,
1752         tcx: TyCtxt<'tcx>,
1753         param_env: ty::ParamEnv<'tcx>,
1754     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1755         debug_assert!(!ty.has_infer_types_or_consts());
1756
1757         // First try computing a static layout.
1758         let err = match tcx.layout_of(param_env.and(ty)) {
1759             Ok(layout) => {
1760                 return Ok(SizeSkeleton::Known(layout.size));
1761             }
1762             Err(err) => err,
1763         };
1764
1765         match ty.kind {
1766             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1767                 let non_zero = !ty.is_unsafe_ptr();
1768                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1769                 match tail.kind {
1770                     ty::Param(_) | ty::Projection(_) => {
1771                         debug_assert!(tail.has_param_types());
1772                         Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
1773                     }
1774                     _ => bug!(
1775                         "SizeSkeleton::compute({}): layout errored ({}), yet \
1776                               tail `{}` is not a type parameter or a projection",
1777                         ty,
1778                         err,
1779                         tail
1780                     ),
1781                 }
1782             }
1783
1784             ty::Adt(def, substs) => {
1785                 // Only newtypes and enums w/ nullable pointer optimization.
1786                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1787                     return Err(err);
1788                 }
1789
1790                 // Get a zero-sized variant or a pointer newtype.
1791                 let zero_or_ptr_variant = |i| {
1792                     let i = VariantIdx::new(i);
1793                     let fields = def.variants[i]
1794                         .fields
1795                         .iter()
1796                         .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1797                     let mut ptr = None;
1798                     for field in fields {
1799                         let field = field?;
1800                         match field {
1801                             SizeSkeleton::Known(size) => {
1802                                 if size.bytes() > 0 {
1803                                     return Err(err);
1804                                 }
1805                             }
1806                             SizeSkeleton::Pointer { .. } => {
1807                                 if ptr.is_some() {
1808                                     return Err(err);
1809                                 }
1810                                 ptr = Some(field);
1811                             }
1812                         }
1813                     }
1814                     Ok(ptr)
1815                 };
1816
1817                 let v0 = zero_or_ptr_variant(0)?;
1818                 // Newtype.
1819                 if def.variants.len() == 1 {
1820                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1821                         return Ok(SizeSkeleton::Pointer {
1822                             non_zero: non_zero
1823                                 || match tcx.layout_scalar_valid_range(def.did) {
1824                                     (Bound::Included(start), Bound::Unbounded) => start > 0,
1825                                     (Bound::Included(start), Bound::Included(end)) => {
1826                                         0 < start && start < end
1827                                     }
1828                                     _ => false,
1829                                 },
1830                             tail,
1831                         });
1832                     } else {
1833                         return Err(err);
1834                     }
1835                 }
1836
1837                 let v1 = zero_or_ptr_variant(1)?;
1838                 // Nullable pointer enum optimization.
1839                 match (v0, v1) {
1840                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1841                     | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1842                         Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1843                     }
1844                     _ => Err(err),
1845                 }
1846             }
1847
1848             ty::Projection(_) | ty::Opaque(..) => {
1849                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1850                 if ty == normalized {
1851                     Err(err)
1852                 } else {
1853                     SizeSkeleton::compute(normalized, tcx, param_env)
1854                 }
1855             }
1856
1857             _ => Err(err),
1858         }
1859     }
1860
1861     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1862         match (self, other) {
1863             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1864             (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1865                 a == b
1866             }
1867             _ => false,
1868         }
1869     }
1870 }
1871
1872 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1873     fn tcx(&self) -> TyCtxt<'tcx>;
1874 }
1875
1876 pub trait HasParamEnv<'tcx> {
1877     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1878 }
1879
1880 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1881     fn data_layout(&self) -> &TargetDataLayout {
1882         &self.data_layout
1883     }
1884 }
1885
1886 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1887     fn tcx(&self) -> TyCtxt<'tcx> {
1888         *self
1889     }
1890 }
1891
1892 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1893     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1894         self.param_env
1895     }
1896 }
1897
1898 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1899     fn data_layout(&self) -> &TargetDataLayout {
1900         self.tcx.data_layout()
1901     }
1902 }
1903
1904 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1905     fn tcx(&self) -> TyCtxt<'tcx> {
1906         self.tcx.tcx()
1907     }
1908 }
1909
1910 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1911
1912 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1913     type Ty = Ty<'tcx>;
1914     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1915
1916     /// Computes the layout of a type. Note that this implicitly
1917     /// executes in "reveal all" mode.
1918     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1919         let param_env = self.param_env.with_reveal_all();
1920         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1921         let details = self.tcx.layout_raw(param_env.and(ty))?;
1922         let layout = TyLayout { ty, details };
1923
1924         // N.B., this recording is normally disabled; when enabled, it
1925         // can however trigger recursive invocations of `layout_of`.
1926         // Therefore, we execute it *after* the main query has
1927         // completed, to avoid problems around recursive structures
1928         // and the like. (Admittedly, I wasn't able to reproduce a problem
1929         // here, but it seems like the right thing to do. -nmatsakis)
1930         self.record_layout_for_printing(layout);
1931
1932         Ok(layout)
1933     }
1934 }
1935
1936 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1937     type Ty = Ty<'tcx>;
1938     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1939
1940     /// Computes the layout of a type. Note that this implicitly
1941     /// executes in "reveal all" mode.
1942     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1943         let param_env = self.param_env.with_reveal_all();
1944         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1945         let details = self.tcx.layout_raw(param_env.and(ty))?;
1946         let layout = TyLayout { ty, details };
1947
1948         // N.B., this recording is normally disabled; when enabled, it
1949         // can however trigger recursive invocations of `layout_of`.
1950         // Therefore, we execute it *after* the main query has
1951         // completed, to avoid problems around recursive structures
1952         // and the like. (Admittedly, I wasn't able to reproduce a problem
1953         // here, but it seems like the right thing to do. -nmatsakis)
1954         let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
1955         cx.record_layout_for_printing(layout);
1956
1957         Ok(layout)
1958     }
1959 }
1960
1961 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1962 impl TyCtxt<'tcx> {
1963     /// Computes the layout of a type. Note that this implicitly
1964     /// executes in "reveal all" mode.
1965     #[inline]
1966     pub fn layout_of(
1967         self,
1968         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1969     ) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1970         let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
1971         cx.layout_of(param_env_and_ty.value)
1972     }
1973 }
1974
1975 impl ty::query::TyCtxtAt<'tcx> {
1976     /// Computes the layout of a type. Note that this implicitly
1977     /// executes in "reveal all" mode.
1978     #[inline]
1979     pub fn layout_of(
1980         self,
1981         param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1982     ) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1983         let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
1984         cx.layout_of(param_env_and_ty.value)
1985     }
1986 }
1987
1988 impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1989 where
1990     C: LayoutOf<Ty = Ty<'tcx>, TyLayout: MaybeResult<TyLayout<'tcx>>>
1991         + HasTyCtxt<'tcx>
1992         + HasParamEnv<'tcx>,
1993 {
1994     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1995         let details = match this.variants {
1996             Variants::Single { index } if index == variant_index => this.details,
1997
1998             Variants::Single { index } => {
1999                 // Deny calling for_variant more than once for non-Single enums.
2000                 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
2001                     assert_eq!(layout.variants, Variants::Single { index });
2002                 }
2003
2004                 let fields = match this.ty.kind {
2005                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2006                     _ => bug!(),
2007                 };
2008                 let tcx = cx.tcx();
2009                 tcx.intern_layout(LayoutDetails {
2010                     variants: Variants::Single { index: variant_index },
2011                     fields: FieldPlacement::Union(fields),
2012                     abi: Abi::Uninhabited,
2013                     largest_niche: None,
2014                     align: tcx.data_layout.i8_align,
2015                     size: Size::ZERO,
2016                 })
2017             }
2018
2019             Variants::Multiple { ref variants, .. } => &variants[variant_index],
2020         };
2021
2022         assert_eq!(details.variants, Variants::Single { index: variant_index });
2023
2024         TyLayout { ty: this.ty, details }
2025     }
2026
2027     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
2028         let tcx = cx.tcx();
2029         let discr_layout = |discr: &Scalar| -> C::TyLayout {
2030             let layout = LayoutDetails::scalar(cx, discr.clone());
2031             MaybeResult::from(Ok(TyLayout {
2032                 details: tcx.intern_layout(layout),
2033                 ty: discr.value.to_ty(tcx),
2034             }))
2035         };
2036
2037         cx.layout_of(match this.ty.kind {
2038             ty::Bool
2039             | ty::Char
2040             | ty::Int(_)
2041             | ty::Uint(_)
2042             | ty::Float(_)
2043             | ty::FnPtr(_)
2044             | ty::Never
2045             | ty::FnDef(..)
2046             | ty::GeneratorWitness(..)
2047             | ty::Foreign(..)
2048             | ty::Dynamic(..) => bug!("TyLayout::field_type({:?}): not applicable", this),
2049
2050             // Potentially-fat pointers.
2051             ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2052                 assert!(i < this.fields.count());
2053
2054                 // Reuse the fat `*T` type as its own thin pointer data field.
2055                 // This provides information about, e.g., DST struct pointees
2056                 // (which may have no non-DST form), and will work as long
2057                 // as the `Abi` or `FieldPlacement` is checked by users.
2058                 if i == 0 {
2059                     let nil = tcx.mk_unit();
2060                     let ptr_ty = if this.ty.is_unsafe_ptr() {
2061                         tcx.mk_mut_ptr(nil)
2062                     } else {
2063                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2064                     };
2065                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2066                         |mut ptr_layout| {
2067                             ptr_layout.ty = this.ty;
2068                             ptr_layout
2069                         },
2070                     ));
2071                 }
2072
2073                 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
2074                     ty::Slice(_) | ty::Str => tcx.types.usize,
2075                     ty::Dynamic(_, _) => {
2076                         tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2077                         /* FIXME: use actual fn pointers
2078                         Warning: naively computing the number of entries in the
2079                         vtable by counting the methods on the trait + methods on
2080                         all parent traits does not work, because some methods can
2081                         be not object safe and thus excluded from the vtable.
2082                         Increase this counter if you tried to implement this but
2083                         failed to do it without duplicating a lot of code from
2084                         other places in the compiler: 2
2085                         tcx.mk_tup(&[
2086                             tcx.mk_array(tcx.types.usize, 3),
2087                             tcx.mk_array(Option<fn()>),
2088                         ])
2089                         */
2090                     }
2091                     _ => bug!("TyLayout::field_type({:?}): not applicable", this),
2092                 }
2093             }
2094
2095             // Arrays and slices.
2096             ty::Array(element, _) | ty::Slice(element) => element,
2097             ty::Str => tcx.types.u8,
2098
2099             // Tuples, generators and closures.
2100             ty::Closure(def_id, ref substs) => {
2101                 substs.as_closure().upvar_tys(def_id, tcx).nth(i).unwrap()
2102             }
2103
2104             ty::Generator(def_id, ref substs, _) => match this.variants {
2105                 Variants::Single { index } => substs
2106                     .as_generator()
2107                     .state_tys(def_id, tcx)
2108                     .nth(index.as_usize())
2109                     .unwrap()
2110                     .nth(i)
2111                     .unwrap(),
2112                 Variants::Multiple { ref discr, discr_index, .. } => {
2113                     if i == discr_index {
2114                         return discr_layout(discr);
2115                     }
2116                     substs.as_generator().prefix_tys(def_id, tcx).nth(i).unwrap()
2117                 }
2118             },
2119
2120             ty::Tuple(tys) => tys[i].expect_ty(),
2121
2122             // SIMD vector types.
2123             ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
2124
2125             // ADTs.
2126             ty::Adt(def, substs) => {
2127                 match this.variants {
2128                     Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2129
2130                     // Discriminant field for enums (where applicable).
2131                     Variants::Multiple { ref discr, .. } => {
2132                         assert_eq!(i, 0);
2133                         return discr_layout(discr);
2134                     }
2135                 }
2136             }
2137
2138             ty::Projection(_)
2139             | ty::UnnormalizedProjection(..)
2140             | ty::Bound(..)
2141             | ty::Placeholder(..)
2142             | ty::Opaque(..)
2143             | ty::Param(_)
2144             | ty::Infer(_)
2145             | ty::Error => bug!("TyLayout::field_type: unexpected type `{}`", this.ty),
2146         })
2147     }
2148
2149     fn pointee_info_at(this: TyLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2150         match this.ty.kind {
2151             ty::RawPtr(mt) if offset.bytes() == 0 => {
2152                 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2153                     size: layout.size,
2154                     align: layout.align.abi,
2155                     safe: None,
2156                 })
2157             }
2158
2159             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2160                 let tcx = cx.tcx();
2161                 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2162                 let kind = match mt {
2163                     hir::Mutability::Not => {
2164                         if is_freeze {
2165                             PointerKind::Frozen
2166                         } else {
2167                             PointerKind::Shared
2168                         }
2169                     }
2170                     hir::Mutability::Mut => {
2171                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2172                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2173                         // in conjunction with unwinding, but later versions didn’t seem to have
2174                         // said issues. See issue #31681.
2175                         //
2176                         // Alas, later on we encountered a case where noalias would generate wrong
2177                         // code altogether even with recent versions of LLVM in *safe* code with no
2178                         // unwinding involved. See #54462.
2179                         //
2180                         // For now, do not enable mutable_noalias by default at all, while the
2181                         // issue is being figured out.
2182                         let mutable_noalias =
2183                             tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(false);
2184                         if mutable_noalias {
2185                             PointerKind::UniqueBorrowed
2186                         } else {
2187                             PointerKind::Shared
2188                         }
2189                     }
2190                 };
2191
2192                 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2193                     size: layout.size,
2194                     align: layout.align.abi,
2195                     safe: Some(kind),
2196                 })
2197             }
2198
2199             _ => {
2200                 let mut data_variant = match this.variants {
2201                     // Within the discriminant field, only the niche itself is
2202                     // always initialized, so we only check for a pointer at its
2203                     // offset.
2204                     //
2205                     // If the niche is a pointer, it's either valid (according
2206                     // to its type), or null (which the niche field's scalar
2207                     // validity range encodes).  This allows using
2208                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2209                     // this will continue to work as long as we don't start
2210                     // using more niches than just null (e.g., the first page of
2211                     // the address space, or unaligned pointers).
2212                     Variants::Multiple {
2213                         discr_kind: DiscriminantKind::Niche { dataful_variant, .. },
2214                         discr_index,
2215                         ..
2216                     } if this.fields.offset(discr_index) == offset => {
2217                         Some(this.for_variant(cx, dataful_variant))
2218                     }
2219                     _ => Some(this),
2220                 };
2221
2222                 if let Some(variant) = data_variant {
2223                     // We're not interested in any unions.
2224                     if let FieldPlacement::Union(_) = variant.fields {
2225                         data_variant = None;
2226                     }
2227                 }
2228
2229                 let mut result = None;
2230
2231                 if let Some(variant) = data_variant {
2232                     let ptr_end = offset + Pointer.size(cx);
2233                     for i in 0..variant.fields.count() {
2234                         let field_start = variant.fields.offset(i);
2235                         if field_start <= offset {
2236                             let field = variant.field(cx, i);
2237                             result = field.to_result().ok().and_then(|field| {
2238                                 if ptr_end <= field_start + field.size {
2239                                     // We found the right field, look inside it.
2240                                     field.pointee_info_at(cx, offset - field_start)
2241                                 } else {
2242                                     None
2243                                 }
2244                             });
2245                             if result.is_some() {
2246                                 break;
2247                             }
2248                         }
2249                     }
2250                 }
2251
2252                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2253                 if let Some(ref mut pointee) = result {
2254                     if let ty::Adt(def, _) = this.ty.kind {
2255                         if def.is_box() && offset.bytes() == 0 {
2256                             pointee.safe = Some(PointerKind::UniqueOwned);
2257                         }
2258                     }
2259                 }
2260
2261                 result
2262             }
2263         }
2264     }
2265 }
2266
2267 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2268     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2269         use crate::ty::layout::LayoutError::*;
2270         mem::discriminant(self).hash_stable(hcx, hasher);
2271
2272         match *self {
2273             Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2274         }
2275     }
2276 }
2277
2278 impl<'tcx> ty::Instance<'tcx> {
2279     // NOTE(eddyb) this is private to avoid using it from outside of
2280     // `FnAbi::of_instance` - any other uses are either too high-level
2281     // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2282     // or should go through `FnAbi` instead, to avoid losing any
2283     // adjustments `FnAbi::of_instance` might be performing.
2284     fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2285         let ty = self.monomorphic_ty(tcx);
2286         match ty.kind {
2287             ty::FnDef(..) |
2288             // Shims currently have type FnPtr. Not sure this should remain.
2289             ty::FnPtr(_) => {
2290                 let mut sig = ty.fn_sig(tcx);
2291                 if let ty::InstanceDef::VtableShim(..) = self.def {
2292                     // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2293                     sig = sig.map_bound(|mut sig| {
2294                         let mut inputs_and_output = sig.inputs_and_output.to_vec();
2295                         inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2296                         sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2297                         sig
2298                     });
2299                 }
2300                 sig
2301             }
2302             ty::Closure(def_id, substs) => {
2303                 let sig = substs.as_closure().sig(def_id, tcx);
2304
2305                 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2306                 sig.map_bound(|sig| tcx.mk_fn_sig(
2307                     iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2308                     sig.output(),
2309                     sig.c_variadic,
2310                     sig.unsafety,
2311                     sig.abi
2312                 ))
2313             }
2314             ty::Generator(def_id, substs, _) => {
2315                 let sig = substs.as_generator().poly_sig(def_id, tcx);
2316
2317                 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2318                 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2319
2320                 let pin_did = tcx.lang_items().pin_type().unwrap();
2321                 let pin_adt_ref = tcx.adt_def(pin_did);
2322                 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2323                 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2324
2325                 sig.map_bound(|sig| {
2326                     let state_did = tcx.lang_items().gen_state().unwrap();
2327                     let state_adt_ref = tcx.adt_def(state_did);
2328                     let state_substs = tcx.intern_substs(&[
2329                         sig.yield_ty.into(),
2330                         sig.return_ty.into(),
2331                     ]);
2332                     let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2333
2334                     tcx.mk_fn_sig(
2335                         [env_ty, sig.resume_ty].iter(),
2336                         &ret_ty,
2337                         false,
2338                         hir::Unsafety::Normal,
2339                         rustc_target::spec::abi::Abi::Rust
2340                     )
2341                 })
2342             }
2343             _ => bug!("unexpected type {:?} in Instance::fn_sig", ty)
2344         }
2345     }
2346 }
2347
2348 pub trait FnAbiExt<'tcx, C>
2349 where
2350     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2351         + HasDataLayout
2352         + HasTargetSpec
2353         + HasTyCtxt<'tcx>
2354         + HasParamEnv<'tcx>,
2355 {
2356     /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2357     ///
2358     /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2359     /// instead, where the instance is a `InstanceDef::Virtual`.
2360     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2361
2362     /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2363     /// direct calls to an `fn`.
2364     ///
2365     /// NB: that includes virtual calls, which are represented by "direct calls"
2366     /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2367     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2368
2369     fn new_internal(
2370         cx: &C,
2371         sig: ty::PolyFnSig<'tcx>,
2372         extra_args: &[Ty<'tcx>],
2373         caller_location: Option<Ty<'tcx>>,
2374         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2375     ) -> Self;
2376     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2377 }
2378
2379 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2380 where
2381     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2382         + HasDataLayout
2383         + HasTargetSpec
2384         + HasTyCtxt<'tcx>
2385         + HasParamEnv<'tcx>,
2386 {
2387     fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2388         call::FnAbi::new_internal(cx, sig, extra_args, None, |ty, _| ArgAbi::new(cx.layout_of(ty)))
2389     }
2390
2391     fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2392         let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2393
2394         let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2395             Some(cx.tcx().caller_location_ty())
2396         } else {
2397             None
2398         };
2399
2400         call::FnAbi::new_internal(cx, sig, extra_args, caller_location, |ty, arg_idx| {
2401             let mut layout = cx.layout_of(ty);
2402             // Don't pass the vtable, it's not an argument of the virtual fn.
2403             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2404             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2405             if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2406                 let fat_pointer_ty = if layout.is_unsized() {
2407                     // unsized `self` is passed as a pointer to `self`
2408                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2409                     cx.tcx().mk_mut_ptr(layout.ty)
2410                 } else {
2411                     match layout.abi {
2412                         Abi::ScalarPair(..) => (),
2413                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2414                     }
2415
2416                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2417                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2418                     // elsewhere in the compiler as a method on a `dyn Trait`.
2419                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2420                     // get a built-in pointer type
2421                     let mut fat_pointer_layout = layout;
2422                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2423                         && !fat_pointer_layout.ty.is_region_ptr()
2424                     {
2425                         for i in 0..fat_pointer_layout.fields.count() {
2426                             let field_layout = fat_pointer_layout.field(cx, i);
2427
2428                             if !field_layout.is_zst() {
2429                                 fat_pointer_layout = field_layout;
2430                                 continue 'descend_newtypes;
2431                             }
2432                         }
2433
2434                         bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2435                     }
2436
2437                     fat_pointer_layout.ty
2438                 };
2439
2440                 // we now have a type like `*mut RcBox<dyn Trait>`
2441                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2442                 // this is understood as a special case elsewhere in the compiler
2443                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2444                 layout = cx.layout_of(unit_pointer_ty);
2445                 layout.ty = fat_pointer_ty;
2446             }
2447             ArgAbi::new(layout)
2448         })
2449     }
2450
2451     fn new_internal(
2452         cx: &C,
2453         sig: ty::PolyFnSig<'tcx>,
2454         extra_args: &[Ty<'tcx>],
2455         caller_location: Option<Ty<'tcx>>,
2456         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2457     ) -> Self {
2458         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2459
2460         let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2461
2462         use rustc_target::spec::abi::Abi::*;
2463         let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2464             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2465
2466             // It's the ABI's job to select this, not ours.
2467             System => bug!("system abi should be selected elsewhere"),
2468             EfiApi => bug!("eficall abi should be selected elsewhere"),
2469
2470             Stdcall => Conv::X86Stdcall,
2471             Fastcall => Conv::X86Fastcall,
2472             Vectorcall => Conv::X86VectorCall,
2473             Thiscall => Conv::X86ThisCall,
2474             C => Conv::C,
2475             Unadjusted => Conv::C,
2476             Win64 => Conv::X86_64Win64,
2477             SysV64 => Conv::X86_64SysV,
2478             Aapcs => Conv::ArmAapcs,
2479             PtxKernel => Conv::PtxKernel,
2480             Msp430Interrupt => Conv::Msp430Intr,
2481             X86Interrupt => Conv::X86Intr,
2482             AmdGpuKernel => Conv::AmdGpuKernel,
2483
2484             // These API constants ought to be more specific...
2485             Cdecl => Conv::C,
2486         };
2487
2488         let mut inputs = sig.inputs();
2489         let extra_args = if sig.abi == RustCall {
2490             assert!(!sig.c_variadic && extra_args.is_empty());
2491
2492             if let Some(input) = sig.inputs().last() {
2493                 if let ty::Tuple(tupled_arguments) = input.kind {
2494                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2495                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2496                 } else {
2497                     bug!(
2498                         "argument to function with \"rust-call\" ABI \
2499                             is not a tuple"
2500                     );
2501                 }
2502             } else {
2503                 bug!(
2504                     "argument to function with \"rust-call\" ABI \
2505                         is not a tuple"
2506                 );
2507             }
2508         } else {
2509             assert!(sig.c_variadic || extra_args.is_empty());
2510             extra_args.to_vec()
2511         };
2512
2513         let target = &cx.tcx().sess.target.target;
2514         let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
2515         let win_x64_gnu =
2516             target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2517         let linux_s390x_gnu_like =
2518             target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2519         let linux_sparc64_gnu_like =
2520             target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2521         let linux_powerpc_gnu_like =
2522             target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2523         let rust_abi = match sig.abi {
2524             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2525             _ => false,
2526         };
2527
2528         // Handle safe Rust thin and fat pointers.
2529         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2530                                       scalar: &Scalar,
2531                                       layout: TyLayout<'tcx>,
2532                                       offset: Size,
2533                                       is_return: bool| {
2534             // Booleans are always an i1 that needs to be zero-extended.
2535             if scalar.is_bool() {
2536                 attrs.set(ArgAttribute::ZExt);
2537                 return;
2538             }
2539
2540             // Only pointer types handled below.
2541             if scalar.value != Pointer {
2542                 return;
2543             }
2544
2545             if scalar.valid_range.start() < scalar.valid_range.end() {
2546                 if *scalar.valid_range.start() > 0 {
2547                     attrs.set(ArgAttribute::NonNull);
2548                 }
2549             }
2550
2551             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2552                 if let Some(kind) = pointee.safe {
2553                     attrs.pointee_align = Some(pointee.align);
2554
2555                     // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2556                     // for the entire duration of the function as they can be deallocated
2557                     // any time. Set their valid size to 0.
2558                     attrs.pointee_size = match kind {
2559                         PointerKind::UniqueOwned => Size::ZERO,
2560                         _ => pointee.size,
2561                     };
2562
2563                     // `Box` pointer parameters never alias because ownership is transferred
2564                     // `&mut` pointer parameters never alias other parameters,
2565                     // or mutable global data
2566                     //
2567                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2568                     // and can be marked as both `readonly` and `noalias`, as
2569                     // LLVM's definition of `noalias` is based solely on memory
2570                     // dependencies rather than pointer equality
2571                     let no_alias = match kind {
2572                         PointerKind::Shared => false,
2573                         PointerKind::UniqueOwned => true,
2574                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2575                     };
2576                     if no_alias {
2577                         attrs.set(ArgAttribute::NoAlias);
2578                     }
2579
2580                     if kind == PointerKind::Frozen && !is_return {
2581                         attrs.set(ArgAttribute::ReadOnly);
2582                     }
2583                 }
2584             }
2585         };
2586
2587         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2588             let is_return = arg_idx.is_none();
2589             let mut arg = mk_arg_type(ty, arg_idx);
2590             if arg.layout.is_zst() {
2591                 // For some forsaken reason, x86_64-pc-windows-gnu
2592                 // doesn't ignore zero-sized struct arguments.
2593                 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2594                 if is_return
2595                     || rust_abi
2596                     || (!win_x64_gnu
2597                         && !linux_s390x_gnu_like
2598                         && !linux_sparc64_gnu_like
2599                         && !linux_powerpc_gnu_like)
2600                 {
2601                     arg.mode = PassMode::Ignore;
2602                 }
2603             }
2604
2605             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2606             if !is_return && rust_abi {
2607                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2608                     let mut a_attrs = ArgAttributes::new();
2609                     let mut b_attrs = ArgAttributes::new();
2610                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2611                     adjust_for_rust_scalar(
2612                         &mut b_attrs,
2613                         b,
2614                         arg.layout,
2615                         a.value.size(cx).align_to(b.value.align(cx).abi),
2616                         false,
2617                     );
2618                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2619                     return arg;
2620                 }
2621             }
2622
2623             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2624                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2625                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2626                 }
2627             }
2628
2629             arg
2630         };
2631
2632         let mut fn_abi = FnAbi {
2633             ret: arg_of(sig.output(), None),
2634             args: inputs
2635                 .iter()
2636                 .cloned()
2637                 .chain(extra_args)
2638                 .chain(caller_location)
2639                 .enumerate()
2640                 .map(|(i, ty)| arg_of(ty, Some(i)))
2641                 .collect(),
2642             c_variadic: sig.c_variadic,
2643             fixed_count: inputs.len(),
2644             conv,
2645         };
2646         fn_abi.adjust_for_abi(cx, sig.abi);
2647         fn_abi
2648     }
2649
2650     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2651         if abi == SpecAbi::Unadjusted {
2652             return;
2653         }
2654
2655         if abi == SpecAbi::Rust
2656             || abi == SpecAbi::RustCall
2657             || abi == SpecAbi::RustIntrinsic
2658             || abi == SpecAbi::PlatformIntrinsic
2659         {
2660             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2661                 if arg.is_ignore() {
2662                     return;
2663                 }
2664
2665                 match arg.layout.abi {
2666                     Abi::Aggregate { .. } => {}
2667
2668                     // This is a fun case! The gist of what this is doing is
2669                     // that we want callers and callees to always agree on the
2670                     // ABI of how they pass SIMD arguments. If we were to *not*
2671                     // make these arguments indirect then they'd be immediates
2672                     // in LLVM, which means that they'd used whatever the
2673                     // appropriate ABI is for the callee and the caller. That
2674                     // means, for example, if the caller doesn't have AVX
2675                     // enabled but the callee does, then passing an AVX argument
2676                     // across this boundary would cause corrupt data to show up.
2677                     //
2678                     // This problem is fixed by unconditionally passing SIMD
2679                     // arguments through memory between callers and callees
2680                     // which should get them all to agree on ABI regardless of
2681                     // target feature sets. Some more information about this
2682                     // issue can be found in #44367.
2683                     //
2684                     // Note that the platform intrinsic ABI is exempt here as
2685                     // that's how we connect up to LLVM and it's unstable
2686                     // anyway, we control all calls to it in libstd.
2687                     Abi::Vector { .. }
2688                         if abi != SpecAbi::PlatformIntrinsic
2689                             && cx.tcx().sess.target.target.options.simd_types_indirect =>
2690                     {
2691                         arg.make_indirect();
2692                         return;
2693                     }
2694
2695                     _ => return,
2696                 }
2697
2698                 let size = arg.layout.size;
2699                 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2700                     arg.make_indirect();
2701                 } else {
2702                     // We want to pass small aggregates as immediates, but using
2703                     // a LLVM aggregate type for this leads to bad optimizations,
2704                     // so we pick an appropriately sized integer type instead.
2705                     arg.cast_to(Reg { kind: RegKind::Integer, size });
2706                 }
2707             };
2708             fixup(&mut self.ret);
2709             for arg in &mut self.args {
2710                 fixup(arg);
2711             }
2712             if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2713                 attrs.set(ArgAttribute::StructRet);
2714             }
2715             return;
2716         }
2717
2718         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2719             cx.tcx().sess.fatal(&msg);
2720         }
2721     }
2722 }