]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Rollup merge of #63285 - Mark-Simulacrum:rm-await-origin, r=Centril
[rust.git] / src / librustc / ty / layout.rs
1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
3
4 use syntax::ast::{self, Ident, IntTy, UintTy};
5 use syntax::attr;
6 use syntax_pos::DUMMY_SP;
7
8 use std::cmp;
9 use std::fmt;
10 use std::i128;
11 use std::iter;
12 use std::mem;
13 use std::ops::Bound;
14
15 use crate::hir;
16 use crate::ich::StableHashingContext;
17 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
18 use crate::ty::GeneratorSubsts;
19 use crate::ty::subst::Subst;
20 use rustc_data_structures::bit_set::BitSet;
21 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
22 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
23                                            StableHasherResult};
24
25 pub use rustc_target::abi::*;
26 use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
27 use rustc_target::abi::call::{
28     ArgAttribute, ArgAttributes, ArgType, Conv, FnType, IgnoreMode, PassMode, Reg, RegKind
29 };
30
31 pub trait IntegerExt {
32     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
33     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
34     fn repr_discr<'tcx>(
35         tcx: TyCtxt<'tcx>,
36         ty: Ty<'tcx>,
37         repr: &ReprOptions,
38         min: i128,
39         max: i128,
40     ) -> (Integer, bool);
41 }
42
43 impl IntegerExt for Integer {
44     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
45         match (*self, signed) {
46             (I8, false) => tcx.types.u8,
47             (I16, false) => tcx.types.u16,
48             (I32, false) => tcx.types.u32,
49             (I64, false) => tcx.types.u64,
50             (I128, false) => tcx.types.u128,
51             (I8, true) => tcx.types.i8,
52             (I16, true) => tcx.types.i16,
53             (I32, true) => tcx.types.i32,
54             (I64, true) => tcx.types.i64,
55             (I128, true) => tcx.types.i128,
56         }
57     }
58
59     /// Gets the Integer type from an attr::IntType.
60     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61         let dl = cx.data_layout();
62
63         match ity {
64             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70                 dl.ptr_sized_integer()
71             }
72         }
73     }
74
75     /// Finds the appropriate Integer type and signedness for the given
76     /// signed discriminant range and #[repr] attribute.
77     /// N.B.: u128 values above i128::MAX will be treated as signed, but
78     /// that shouldn't affect anything, other than maybe debuginfo.
79     fn repr_discr<'tcx>(
80         tcx: TyCtxt<'tcx>,
81         ty: Ty<'tcx>,
82         repr: &ReprOptions,
83         min: i128,
84         max: i128,
85     ) -> (Integer, bool) {
86         // Theoretically, negative values could be larger in unsigned representation
87         // than the unsigned representation of the signed minimum. However, if there
88         // are any negative values, the only valid unsigned representation is u128
89         // which can fit all i128 values, so the result remains unaffected.
90         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
91         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92
93         let mut min_from_extern = None;
94         let min_default = I8;
95
96         if let Some(ity) = repr.int {
97             let discr = Integer::from_attr(&tcx, ity);
98             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
99             if discr < fit {
100                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
101                       discriminant range of enum `{}", ty)
102             }
103             return (discr, ity.is_signed());
104         }
105
106         if repr.c() {
107             match &tcx.sess.target.target.arch[..] {
108                 // WARNING: the ARM EABI has two variants; the one corresponding
109                 // to `at_least == I32` appears to be used on Linux and NetBSD,
110                 // but some systems may use the variant corresponding to no
111                 // lower bound. However, we don't run on those yet...?
112                 "arm" => min_from_extern = Some(I32),
113                 _ => min_from_extern = Some(I32),
114             }
115         }
116
117         let at_least = min_from_extern.unwrap_or(min_default);
118
119         // If there are no negative values, we can use the unsigned fit.
120         if min >= 0 {
121             (cmp::max(unsigned_fit, at_least), false)
122         } else {
123             (cmp::max(signed_fit, at_least), true)
124         }
125     }
126 }
127
128 pub trait PrimitiveExt {
129     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
130 }
131
132 impl PrimitiveExt for Primitive {
133     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
134         match *self {
135             Int(i, signed) => i.to_ty(tcx, signed),
136             Float(FloatTy::F32) => tcx.types.f32,
137             Float(FloatTy::F64) => tcx.types.f64,
138             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
139         }
140     }
141 }
142
143 /// The first half of a fat pointer.
144 ///
145 /// - For a trait object, this is the address of the box.
146 /// - For a slice, this is the base address.
147 pub const FAT_PTR_ADDR: usize = 0;
148
149 /// The second half of a fat pointer.
150 ///
151 /// - For a trait object, this is the address of the vtable.
152 /// - For a slice, this is the length.
153 pub const FAT_PTR_EXTRA: usize = 1;
154
155 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
156 pub enum LayoutError<'tcx> {
157     Unknown(Ty<'tcx>),
158     SizeOverflow(Ty<'tcx>)
159 }
160
161 impl<'tcx> fmt::Display for LayoutError<'tcx> {
162     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
163         match *self {
164             LayoutError::Unknown(ty) => {
165                 write!(f, "the type `{:?}` has an unknown layout", ty)
166             }
167             LayoutError::SizeOverflow(ty) => {
168                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
169             }
170         }
171     }
172 }
173
174 fn layout_raw<'tcx>(
175     tcx: TyCtxt<'tcx>,
176     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
177 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
178     ty::tls::with_related_context(tcx, move |icx| {
179         let rec_limit = *tcx.sess.recursion_limit.get();
180         let (param_env, ty) = query.into_parts();
181
182         if icx.layout_depth > rec_limit {
183             tcx.sess.fatal(
184                 &format!("overflow representing the type `{}`", ty));
185         }
186
187         // Update the ImplicitCtxt to increase the layout_depth
188         let icx = ty::tls::ImplicitCtxt {
189             layout_depth: icx.layout_depth + 1,
190             ..icx.clone()
191         };
192
193         ty::tls::enter_context(&icx, |_| {
194             let cx = LayoutCx { tcx, param_env };
195             let layout = cx.layout_raw_uncached(ty);
196             // Type-level uninhabitedness should always imply ABI uninhabitedness.
197             if let Ok(layout) = layout {
198                 if ty.conservative_is_privately_uninhabited(tcx) {
199                     assert!(layout.abi.is_uninhabited());
200                 }
201             }
202             layout
203         })
204     })
205 }
206
207 pub fn provide(providers: &mut ty::query::Providers<'_>) {
208     *providers = ty::query::Providers {
209         layout_raw,
210         ..*providers
211     };
212 }
213
214 pub struct LayoutCx<'tcx, C> {
215     pub tcx: C,
216     pub param_env: ty::ParamEnv<'tcx>,
217 }
218
219 #[derive(Copy, Clone, Debug)]
220 enum StructKind {
221     /// A tuple, closure, or univariant which cannot be coerced to unsized.
222     AlwaysSized,
223     /// A univariant, the last field of which may be coerced to unsized.
224     MaybeUnsized,
225     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
226     Prefixed(Size, Align),
227 }
228
229 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
230 // This is used to go between `memory_index` (source field order to memory order)
231 // and `inverse_memory_index` (memory order to source field order).
232 // See also `FieldPlacement::Arbitrary::memory_index` for more details.
233 // FIXME(eddyb) build a better abstraction for permutations, if possible.
234 fn invert_mapping(map: &[u32]) -> Vec<u32> {
235     let mut inverse = vec![0; map.len()];
236     for i in 0..map.len() {
237         inverse[map[i] as usize] = i as u32;
238     }
239     inverse
240 }
241
242 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
243     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
244         let dl = self.data_layout();
245         let b_align = b.value.align(dl);
246         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
247         let b_offset = a.value.size(dl).align_to(b_align.abi);
248         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
249
250         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
251         // returns the last maximum.
252         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
253             .into_iter()
254             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
255             .max_by_key(|niche| niche.available(dl));
256
257         LayoutDetails {
258             variants: Variants::Single { index: VariantIdx::new(0) },
259             fields: FieldPlacement::Arbitrary {
260                 offsets: vec![Size::ZERO, b_offset],
261                 memory_index: vec![0, 1]
262             },
263             abi: Abi::ScalarPair(a, b),
264             largest_niche,
265             align,
266             size
267         }
268     }
269
270     fn univariant_uninterned(&self,
271                              ty: Ty<'tcx>,
272                              fields: &[TyLayout<'_>],
273                              repr: &ReprOptions,
274                              kind: StructKind) -> Result<LayoutDetails, LayoutError<'tcx>> {
275         let dl = self.data_layout();
276         let packed = repr.packed();
277         if packed && repr.align > 0 {
278             bug!("struct cannot be packed and aligned");
279         }
280
281         let pack = Align::from_bytes(repr.pack as u64).unwrap();
282
283         let mut align = if packed {
284             dl.i8_align
285         } else {
286             dl.aggregate_align
287         };
288
289         let mut sized = true;
290         let mut offsets = vec![Size::ZERO; fields.len()];
291         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
292
293         let mut optimize = !repr.inhibit_struct_field_reordering_opt();
294         if let StructKind::Prefixed(_, align) = kind {
295             optimize &= align.bytes() == 1;
296         }
297
298         if optimize {
299             let end = if let StructKind::MaybeUnsized = kind {
300                 fields.len() - 1
301             } else {
302                 fields.len()
303             };
304             let optimizing = &mut inverse_memory_index[..end];
305             let field_align = |f: &TyLayout<'_>| {
306                 if packed { f.align.abi.min(pack) } else { f.align.abi }
307             };
308             match kind {
309                 StructKind::AlwaysSized |
310                 StructKind::MaybeUnsized => {
311                     optimizing.sort_by_key(|&x| {
312                         // Place ZSTs first to avoid "interesting offsets",
313                         // especially with only one or two non-ZST fields.
314                         let f = &fields[x as usize];
315                         (!f.is_zst(), cmp::Reverse(field_align(f)))
316                     });
317                 }
318                 StructKind::Prefixed(..) => {
319                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
320                 }
321             }
322         }
323
324         // inverse_memory_index holds field indices by increasing memory offset.
325         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
326         // We now write field offsets to the corresponding offset slot;
327         // field 5 with offset 0 puts 0 in offsets[5].
328         // At the bottom of this function, we invert `inverse_memory_index` to
329         // produce `memory_index` (see `invert_mapping`).
330
331
332         let mut offset = Size::ZERO;
333         let mut largest_niche = None;
334         let mut largest_niche_available = 0;
335
336         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
337             let prefix_align = if packed {
338                 prefix_align.min(pack)
339             } else {
340                 prefix_align
341             };
342             align = align.max(AbiAndPrefAlign::new(prefix_align));
343             offset = prefix_size.align_to(prefix_align);
344         }
345
346         for &i in &inverse_memory_index {
347             let field = fields[i as usize];
348             if !sized {
349                 bug!("univariant: field #{} of `{}` comes after unsized field",
350                      offsets.len(), ty);
351             }
352
353             if field.is_unsized() {
354                 sized = false;
355             }
356
357             // Invariant: offset < dl.obj_size_bound() <= 1<<61
358             let field_align = if packed {
359                 field.align.min(AbiAndPrefAlign::new(pack))
360             } else {
361                 field.align
362             };
363             offset = offset.align_to(field_align.abi);
364             align = align.max(field_align);
365
366             debug!("univariant offset: {:?} field: {:#?}", offset, field);
367             offsets[i as usize] = offset;
368
369             if let Some(mut niche) = field.largest_niche.clone() {
370                 let available = niche.available(dl);
371                 if available > largest_niche_available {
372                     largest_niche_available = available;
373                     niche.offset += offset;
374                     largest_niche = Some(niche);
375                 }
376             }
377
378             offset = offset.checked_add(field.size, dl)
379                 .ok_or(LayoutError::SizeOverflow(ty))?;
380         }
381
382         if repr.align > 0 {
383             let repr_align = repr.align as u64;
384             align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
385             debug!("univariant repr_align: {:?}", repr_align);
386         }
387
388         debug!("univariant min_size: {:?}", offset);
389         let min_size = offset;
390
391         // As stated above, inverse_memory_index holds field indices by increasing offset.
392         // This makes it an already-sorted view of the offsets vec.
393         // To invert it, consider:
394         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
395         // Field 5 would be the first element, so memory_index is i:
396         // Note: if we didn't optimize, it's already right.
397
398         let memory_index;
399         if optimize {
400             memory_index = invert_mapping(&inverse_memory_index);
401         } else {
402             memory_index = inverse_memory_index;
403         }
404
405         let size = min_size.align_to(align.abi);
406         let mut abi = Abi::Aggregate { sized };
407
408         // Unpack newtype ABIs and find scalar pairs.
409         if sized && size.bytes() > 0 {
410             // All other fields must be ZSTs, and we need them to all start at 0.
411             let mut zst_offsets =
412                 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
413             if zst_offsets.all(|(_, o)| o.bytes() == 0) {
414                 let mut non_zst_fields =
415                     fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
416
417                 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
418                     // We have exactly one non-ZST field.
419                     (Some((i, field)), None, None) => {
420                         // Field fills the struct and it has a scalar or scalar pair ABI.
421                         if offsets[i].bytes() == 0 &&
422                            align.abi == field.align.abi &&
423                            size == field.size {
424                             match field.abi {
425                                 // For plain scalars, or vectors of them, we can't unpack
426                                 // newtypes for `#[repr(C)]`, as that affects C ABIs.
427                                 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
428                                     abi = field.abi.clone();
429                                 }
430                                 // But scalar pairs are Rust-specific and get
431                                 // treated as aggregates by C ABIs anyway.
432                                 Abi::ScalarPair(..) => {
433                                     abi = field.abi.clone();
434                                 }
435                                 _ => {}
436                             }
437                         }
438                     }
439
440                     // Two non-ZST fields, and they're both scalars.
441                     (Some((i, &TyLayout {
442                         details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
443                     })), Some((j, &TyLayout {
444                         details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
445                     })), None) => {
446                         // Order by the memory placement, not source order.
447                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
448                             ((i, a), (j, b))
449                         } else {
450                             ((j, b), (i, a))
451                         };
452                         let pair = self.scalar_pair(a.clone(), b.clone());
453                         let pair_offsets = match pair.fields {
454                             FieldPlacement::Arbitrary {
455                                 ref offsets,
456                                 ref memory_index
457                             } => {
458                                 assert_eq!(memory_index, &[0, 1]);
459                                 offsets
460                             }
461                             _ => bug!()
462                         };
463                         if offsets[i] == pair_offsets[0] &&
464                            offsets[j] == pair_offsets[1] &&
465                            align == pair.align &&
466                            size == pair.size {
467                             // We can use `ScalarPair` only when it matches our
468                             // already computed layout (including `#[repr(C)]`).
469                             abi = pair.abi;
470                         }
471                     }
472
473                     _ => {}
474                 }
475             }
476         }
477
478         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
479             abi = Abi::Uninhabited;
480         }
481
482         Ok(LayoutDetails {
483             variants: Variants::Single { index: VariantIdx::new(0) },
484             fields: FieldPlacement::Arbitrary {
485                 offsets,
486                 memory_index
487             },
488             abi,
489             largest_niche,
490             align,
491             size
492         })
493     }
494
495     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
496         let tcx = self.tcx;
497         let param_env = self.param_env;
498         let dl = self.data_layout();
499         let scalar_unit = |value: Primitive| {
500             let bits = value.size(dl).bits();
501             assert!(bits <= 128);
502             Scalar {
503                 value,
504                 valid_range: 0..=(!0 >> (128 - bits))
505             }
506         };
507         let scalar = |value: Primitive| {
508             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
509         };
510
511         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
512             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
513         };
514         debug_assert!(!ty.has_infer_types());
515
516         Ok(match ty.sty {
517             // Basic scalars.
518             ty::Bool => {
519                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
520                     value: Int(I8, false),
521                     valid_range: 0..=1
522                 }))
523             }
524             ty::Char => {
525                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
526                     value: Int(I32, false),
527                     valid_range: 0..=0x10FFFF
528                 }))
529             }
530             ty::Int(ity) => {
531                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
532             }
533             ty::Uint(ity) => {
534                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
535             }
536             ty::Float(fty) => scalar(Float(fty)),
537             ty::FnPtr(_) => {
538                 let mut ptr = scalar_unit(Pointer);
539                 ptr.valid_range = 1..=*ptr.valid_range.end();
540                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
541             }
542
543             // The never type.
544             ty::Never => {
545                 tcx.intern_layout(LayoutDetails {
546                     variants: Variants::Single { index: VariantIdx::new(0) },
547                     fields: FieldPlacement::Union(0),
548                     abi: Abi::Uninhabited,
549                     largest_niche: None,
550                     align: dl.i8_align,
551                     size: Size::ZERO
552                 })
553             }
554
555             // Potentially-fat pointers.
556             ty::Ref(_, pointee, _) |
557             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
558                 let mut data_ptr = scalar_unit(Pointer);
559                 if !ty.is_unsafe_ptr() {
560                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
561                 }
562
563                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
564                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
565                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
566                 }
567
568                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
569                 let metadata = match unsized_part.sty {
570                     ty::Foreign(..) => {
571                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
572                     }
573                     ty::Slice(_) | ty::Str => {
574                         scalar_unit(Int(dl.ptr_sized_integer(), false))
575                     }
576                     ty::Dynamic(..) => {
577                         let mut vtable = scalar_unit(Pointer);
578                         vtable.valid_range = 1..=*vtable.valid_range.end();
579                         vtable
580                     }
581                     _ => return Err(LayoutError::Unknown(unsized_part))
582                 };
583
584                 // Effectively a (ptr, meta) tuple.
585                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
586             }
587
588             // Arrays and slices.
589             ty::Array(element, mut count) => {
590                 if count.has_projections() {
591                     count = tcx.normalize_erasing_regions(param_env, count);
592                     if count.has_projections() {
593                         return Err(LayoutError::Unknown(ty));
594                     }
595                 }
596
597                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
598                 let element = self.layout_of(element)?;
599                 let size = element.size.checked_mul(count, dl)
600                     .ok_or(LayoutError::SizeOverflow(ty))?;
601
602                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
603                     Abi::Uninhabited
604                 } else {
605                     Abi::Aggregate { sized: true }
606                 };
607
608                 let largest_niche = if count != 0 {
609                     element.largest_niche.clone()
610                 } else {
611                     None
612                 };
613
614                 tcx.intern_layout(LayoutDetails {
615                     variants: Variants::Single { index: VariantIdx::new(0) },
616                     fields: FieldPlacement::Array {
617                         stride: element.size,
618                         count
619                     },
620                     abi,
621                     largest_niche,
622                     align: element.align,
623                     size
624                 })
625             }
626             ty::Slice(element) => {
627                 let element = self.layout_of(element)?;
628                 tcx.intern_layout(LayoutDetails {
629                     variants: Variants::Single { index: VariantIdx::new(0) },
630                     fields: FieldPlacement::Array {
631                         stride: element.size,
632                         count: 0
633                     },
634                     abi: Abi::Aggregate { sized: false },
635                     largest_niche: None,
636                     align: element.align,
637                     size: Size::ZERO
638                 })
639             }
640             ty::Str => {
641                 tcx.intern_layout(LayoutDetails {
642                     variants: Variants::Single { index: VariantIdx::new(0) },
643                     fields: FieldPlacement::Array {
644                         stride: Size::from_bytes(1),
645                         count: 0
646                     },
647                     abi: Abi::Aggregate { sized: false },
648                     largest_niche: None,
649                     align: dl.i8_align,
650                     size: Size::ZERO
651                 })
652             }
653
654             // Odd unit types.
655             ty::FnDef(..) => {
656                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
657             }
658             ty::Dynamic(..) | ty::Foreign(..) => {
659                 let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(),
660                   StructKind::AlwaysSized)?;
661                 match unit.abi {
662                     Abi::Aggregate { ref mut sized } => *sized = false,
663                     _ => bug!()
664                 }
665                 tcx.intern_layout(unit)
666             }
667
668             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, &substs)?,
669
670             ty::Closure(def_id, ref substs) => {
671                 let tys = substs.upvar_tys(def_id, tcx);
672                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
673                     &ReprOptions::default(),
674                     StructKind::AlwaysSized)?
675             }
676
677             ty::Tuple(tys) => {
678                 let kind = if tys.len() == 0 {
679                     StructKind::AlwaysSized
680                 } else {
681                     StructKind::MaybeUnsized
682                 };
683
684                 univariant(&tys.iter().map(|k| {
685                     self.layout_of(k.expect_ty())
686                 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
687             }
688
689             // SIMD vector types.
690             ty::Adt(def, ..) if def.repr.simd() => {
691                 let element = self.layout_of(ty.simd_type(tcx))?;
692                 let count = ty.simd_size(tcx) as u64;
693                 assert!(count > 0);
694                 let scalar = match element.abi {
695                     Abi::Scalar(ref scalar) => scalar.clone(),
696                     _ => {
697                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
698                                                  a non-machine element type `{}`",
699                                                 ty, element.ty));
700                     }
701                 };
702                 let size = element.size.checked_mul(count, dl)
703                     .ok_or(LayoutError::SizeOverflow(ty))?;
704                 let align = dl.vector_align(size);
705                 let size = size.align_to(align.abi);
706
707                 tcx.intern_layout(LayoutDetails {
708                     variants: Variants::Single { index: VariantIdx::new(0) },
709                     fields: FieldPlacement::Array {
710                         stride: element.size,
711                         count
712                     },
713                     abi: Abi::Vector {
714                         element: scalar,
715                         count
716                     },
717                     largest_niche: element.largest_niche.clone(),
718                     size,
719                     align,
720                 })
721             }
722
723             // ADTs.
724             ty::Adt(def, substs) => {
725                 // Cache the field layouts.
726                 let variants = def.variants.iter().map(|v| {
727                     v.fields.iter().map(|field| {
728                         self.layout_of(field.ty(tcx, substs))
729                     }).collect::<Result<Vec<_>, _>>()
730                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
731
732                 if def.is_union() {
733                     let packed = def.repr.packed();
734                     if packed && def.repr.align > 0 {
735                         bug!("Union cannot be packed and aligned");
736                     }
737
738                     let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
739
740                     let mut align = if packed {
741                         dl.i8_align
742                     } else {
743                         dl.aggregate_align
744                     };
745
746                     if def.repr.align > 0 {
747                         let repr_align = def.repr.align as u64;
748                         align = align.max(
749                             AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
750                     }
751
752                     let optimize = !def.repr.inhibit_union_abi_opt();
753                     let mut size = Size::ZERO;
754                     let mut abi = Abi::Aggregate { sized: true };
755                     let index = VariantIdx::new(0);
756                     for field in &variants[index] {
757                         assert!(!field.is_unsized());
758
759                         let field_align = if packed {
760                             field.align.min(AbiAndPrefAlign::new(pack))
761                         } else {
762                             field.align
763                         };
764                         align = align.max(field_align);
765
766                         // If all non-ZST fields have the same ABI, forward this ABI
767                         if optimize && !field.is_zst() {
768                             // Normalize scalar_unit to the maximal valid range
769                             let field_abi = match &field.abi {
770                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
771                                 Abi::ScalarPair(x, y) => {
772                                     Abi::ScalarPair(
773                                         scalar_unit(x.value),
774                                         scalar_unit(y.value),
775                                     )
776                                 }
777                                 Abi::Vector { element: x, count } => {
778                                     Abi::Vector {
779                                         element: scalar_unit(x.value),
780                                         count: *count,
781                                     }
782                                 }
783                                 Abi::Uninhabited |
784                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
785                             };
786
787                             if size == Size::ZERO {
788                                 // first non ZST: initialize 'abi'
789                                 abi = field_abi;
790                             } else if abi != field_abi  {
791                                 // different fields have different ABI: reset to Aggregate
792                                 abi = Abi::Aggregate { sized: true };
793                             }
794                         }
795
796                         size = cmp::max(size, field.size);
797                     }
798
799                     return Ok(tcx.intern_layout(LayoutDetails {
800                         variants: Variants::Single { index },
801                         fields: FieldPlacement::Union(variants[index].len()),
802                         abi,
803                         largest_niche: None,
804                         align,
805                         size: size.align_to(align.abi)
806                     }));
807                 }
808
809                 // A variant is absent if it's uninhabited and only has ZST fields.
810                 // Present uninhabited variants only require space for their fields,
811                 // but *not* an encoding of the discriminant (e.g., a tag value).
812                 // See issue #49298 for more details on the need to leave space
813                 // for non-ZST uninhabited data (mostly partial initialization).
814                 let absent = |fields: &[TyLayout<'_>]| {
815                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
816                     let is_zst = fields.iter().all(|f| f.is_zst());
817                     uninhabited && is_zst
818                 };
819                 let (present_first, present_second) = {
820                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
821                         if absent(v) {
822                             None
823                         } else {
824                             Some(i)
825                         }
826                     });
827                     (present_variants.next(), present_variants.next())
828                 };
829                 if present_first.is_none() {
830                     // Uninhabited because it has no variants, or only absent ones.
831                     return tcx.layout_raw(param_env.and(tcx.types.never));
832                 }
833
834                 let is_struct = !def.is_enum() ||
835                     // Only one variant is present.
836                     (present_second.is_none() &&
837                     // Representation optimizations are allowed.
838                     !def.repr.inhibit_enum_layout_opt());
839                 if is_struct {
840                     // Struct, or univariant enum equivalent to a struct.
841                     // (Typechecking will reject discriminant-sizing attrs.)
842
843                     let v = present_first.unwrap();
844                     let kind = if def.is_enum() || variants[v].len() == 0 {
845                         StructKind::AlwaysSized
846                     } else {
847                         let param_env = tcx.param_env(def.did);
848                         let last_field = def.variants[v].fields.last().unwrap();
849                         let always_sized = tcx.type_of(last_field.did)
850                                               .is_sized(tcx.at(DUMMY_SP), param_env);
851                         if !always_sized { StructKind::MaybeUnsized }
852                         else { StructKind::AlwaysSized }
853                     };
854
855                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
856                     st.variants = Variants::Single { index: v };
857                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
858                     match st.abi {
859                         Abi::Scalar(ref mut scalar) |
860                         Abi::ScalarPair(ref mut scalar, _) => {
861                             // the asserts ensure that we are not using the
862                             // `#[rustc_layout_scalar_valid_range(n)]`
863                             // attribute to widen the range of anything as that would probably
864                             // result in UB somewhere
865                             // FIXME(eddyb) the asserts are probably not needed,
866                             // as larger validity ranges would result in missed
867                             // optimizations, *not* wrongly assuming the inner
868                             // value is valid. e.g. unions enlarge validity ranges,
869                             // because the values may be uninitialized.
870                             if let Bound::Included(start) = start {
871                                 // FIXME(eddyb) this might be incorrect - it doesn't
872                                 // account for wrap-around (end < start) ranges.
873                                 assert!(*scalar.valid_range.start() <= start);
874                                 scalar.valid_range = start..=*scalar.valid_range.end();
875                             }
876                             if let Bound::Included(end) = end {
877                                 // FIXME(eddyb) this might be incorrect - it doesn't
878                                 // account for wrap-around (end < start) ranges.
879                                 assert!(*scalar.valid_range.end() >= end);
880                                 scalar.valid_range = *scalar.valid_range.start()..=end;
881                             }
882
883                             // Update `largest_niche` if we have introduced a larger niche.
884                             let niche = Niche::from_scalar(dl, Size::ZERO, scalar.clone());
885                             if let Some(niche) = niche {
886                                 match &st.largest_niche {
887                                     Some(largest_niche) => {
888                                         // Replace the existing niche even if they're equal,
889                                         // because this one is at a lower offset.
890                                         if largest_niche.available(dl) <= niche.available(dl) {
891                                             st.largest_niche = Some(niche);
892                                         }
893                                     }
894                                     None => st.largest_niche = Some(niche),
895                                 }
896                             }
897                         }
898                         _ => assert!(
899                             start == Bound::Unbounded && end == Bound::Unbounded,
900                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
901                             def,
902                             st,
903                         ),
904                     }
905
906                     return Ok(tcx.intern_layout(st));
907                 }
908
909                 // The current code for niche-filling relies on variant indices
910                 // instead of actual discriminants, so dataful enums with
911                 // explicit discriminants (RFC #2363) would misbehave.
912                 let no_explicit_discriminants = def.variants.iter_enumerated()
913                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
914
915                 // Niche-filling enum optimization.
916                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
917                     let mut dataful_variant = None;
918                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
919
920                     // Find one non-ZST variant.
921                     'variants: for (v, fields) in variants.iter_enumerated() {
922                         if absent(fields) {
923                             continue 'variants;
924                         }
925                         for f in fields {
926                             if !f.is_zst() {
927                                 if dataful_variant.is_none() {
928                                     dataful_variant = Some(v);
929                                     continue 'variants;
930                                 } else {
931                                     dataful_variant = None;
932                                     break 'variants;
933                                 }
934                             }
935                         }
936                         niche_variants = *niche_variants.start().min(&v)..=v;
937                     }
938
939                     if niche_variants.start() > niche_variants.end() {
940                         dataful_variant = None;
941                     }
942
943                     if let Some(i) = dataful_variant {
944                         let count = (
945                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
946                         ) as u128;
947                         // FIXME(#62691) use the largest niche across all fields,
948                         // not just the first one.
949                         for (field_index, &field) in variants[i].iter().enumerate() {
950                             let niche = match &field.largest_niche {
951                                 Some(niche) => niche,
952                                 _ => continue,
953                             };
954                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
955                                 Some(pair) => pair,
956                                 None => continue,
957                             };
958
959                             let mut align = dl.aggregate_align;
960                             let st = variants.iter_enumerated().map(|(j, v)| {
961                                 let mut st = self.univariant_uninterned(ty, v,
962                                     &def.repr, StructKind::AlwaysSized)?;
963                                 st.variants = Variants::Single { index: j };
964
965                                 align = align.max(st.align);
966
967                                 Ok(st)
968                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
969
970                             let offset = st[i].fields.offset(field_index) + niche.offset;
971                             let size = st[i].size;
972
973                             let mut abi = match st[i].abi {
974                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
975                                 Abi::ScalarPair(ref first, ref second) => {
976                                     // We need to use scalar_unit to reset the
977                                     // valid range to the maximal one for that
978                                     // primitive, because only the niche is
979                                     // guaranteed to be initialised, not the
980                                     // other primitive.
981                                     if offset.bytes() == 0 {
982                                         Abi::ScalarPair(
983                                             niche_scalar.clone(),
984                                             scalar_unit(second.value),
985                                         )
986                                     } else {
987                                         Abi::ScalarPair(
988                                             scalar_unit(first.value),
989                                             niche_scalar.clone(),
990                                         )
991                                     }
992                                 }
993                                 _ => Abi::Aggregate { sized: true },
994                             };
995
996                             if st.iter().all(|v| v.abi.is_uninhabited()) {
997                                 abi = Abi::Uninhabited;
998                             }
999
1000
1001                             let largest_niche =
1002                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1003
1004                             return Ok(tcx.intern_layout(LayoutDetails {
1005                                 variants: Variants::Multiple {
1006                                     discr: niche_scalar,
1007                                     discr_kind: DiscriminantKind::Niche {
1008                                         dataful_variant: i,
1009                                         niche_variants,
1010                                         niche_start,
1011                                     },
1012                                     discr_index: 0,
1013                                     variants: st,
1014                                 },
1015                                 fields: FieldPlacement::Arbitrary {
1016                                     offsets: vec![offset],
1017                                     memory_index: vec![0]
1018                                 },
1019                                 abi,
1020                                 largest_niche,
1021                                 size,
1022                                 align,
1023                             }));
1024                         }
1025                     }
1026                 }
1027
1028                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
1029                 let discr_type = def.repr.discr_type();
1030                 let bits = Integer::from_attr(self, discr_type).size().bits();
1031                 for (i, discr) in def.discriminants(tcx) {
1032                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1033                         continue;
1034                     }
1035                     let mut x = discr.val as i128;
1036                     if discr_type.is_signed() {
1037                         // sign extend the raw representation to be an i128
1038                         x = (x << (128 - bits)) >> (128 - bits);
1039                     }
1040                     if x < min { min = x; }
1041                     if x > max { max = x; }
1042                 }
1043                 // We might have no inhabited variants, so pretend there's at least one.
1044                 if (min, max) == (i128::max_value(), i128::min_value()) {
1045                     min = 0;
1046                     max = 0;
1047                 }
1048                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1049                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1050
1051                 let mut align = dl.aggregate_align;
1052                 let mut size = Size::ZERO;
1053
1054                 // We're interested in the smallest alignment, so start large.
1055                 let mut start_align = Align::from_bytes(256).unwrap();
1056                 assert_eq!(Integer::for_align(dl, start_align), None);
1057
1058                 // repr(C) on an enum tells us to make a (tag, union) layout,
1059                 // so we need to grow the prefix alignment to be at least
1060                 // the alignment of the union. (This value is used both for
1061                 // determining the alignment of the overall enum, and the
1062                 // determining the alignment of the payload after the tag.)
1063                 let mut prefix_align = min_ity.align(dl).abi;
1064                 if def.repr.c() {
1065                     for fields in &variants {
1066                         for field in fields {
1067                             prefix_align = prefix_align.max(field.align.abi);
1068                         }
1069                     }
1070                 }
1071
1072                 // Create the set of structs that represent each variant.
1073                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
1074                     let mut st = self.univariant_uninterned(ty, &field_layouts,
1075                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1076                     st.variants = Variants::Single { index: i };
1077                     // Find the first field we can't move later
1078                     // to make room for a larger discriminant.
1079                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1080                         if !field.is_zst() || field.align.abi.bytes() != 1 {
1081                             start_align = start_align.min(field.align.abi);
1082                             break;
1083                         }
1084                     }
1085                     size = cmp::max(size, st.size);
1086                     align = align.max(st.align);
1087                     Ok(st)
1088                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1089
1090                 // Align the maximum variant size to the largest alignment.
1091                 size = size.align_to(align.abi);
1092
1093                 if size.bytes() >= dl.obj_size_bound() {
1094                     return Err(LayoutError::SizeOverflow(ty));
1095                 }
1096
1097                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1098                 if typeck_ity < min_ity {
1099                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1100                     // some reason at this point (based on values discriminant can take on). Mostly
1101                     // because this discriminant will be loaded, and then stored into variable of
1102                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1103                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1104                     // discriminant values. That would be a bug, because then, in codegen, in order
1105                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1106                     // space necessary to represent would have to be discarded (or layout is wrong
1107                     // on thinking it needs 16 bits)
1108                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1109                          min_ity, typeck_ity);
1110                     // However, it is fine to make discr type however large (as an optimisation)
1111                     // after this point â€“ we’ll just truncate the value we load in codegen.
1112                 }
1113
1114                 // Check to see if we should use a different type for the
1115                 // discriminant. We can safely use a type with the same size
1116                 // as the alignment of the first field of each variant.
1117                 // We increase the size of the discriminant to avoid LLVM copying
1118                 // padding when it doesn't need to. This normally causes unaligned
1119                 // load/stores and excessive memcpy/memset operations. By using a
1120                 // bigger integer size, LLVM can be sure about its contents and
1121                 // won't be so conservative.
1122
1123                 // Use the initial field alignment
1124                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1125                     min_ity
1126                 } else {
1127                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1128                 };
1129
1130                 // If the alignment is not larger than the chosen discriminant size,
1131                 // don't use the alignment as the final size.
1132                 if ity <= min_ity {
1133                     ity = min_ity;
1134                 } else {
1135                     // Patch up the variants' first few fields.
1136                     let old_ity_size = min_ity.size();
1137                     let new_ity_size = ity.size();
1138                     for variant in &mut layout_variants {
1139                         match variant.fields {
1140                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1141                                 for i in offsets {
1142                                     if *i <= old_ity_size {
1143                                         assert_eq!(*i, old_ity_size);
1144                                         *i = new_ity_size;
1145                                     }
1146                                 }
1147                                 // We might be making the struct larger.
1148                                 if variant.size <= old_ity_size {
1149                                     variant.size = new_ity_size;
1150                                 }
1151                             }
1152                             _ => bug!()
1153                         }
1154                     }
1155                 }
1156
1157                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1158                 let tag = Scalar {
1159                     value: Int(ity, signed),
1160                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1161                 };
1162                 let mut abi = Abi::Aggregate { sized: true };
1163                 if tag.value.size(dl) == size {
1164                     abi = Abi::Scalar(tag.clone());
1165                 } else {
1166                     // Try to use a ScalarPair for all tagged enums.
1167                     let mut common_prim = None;
1168                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1169                         let offsets = match layout_variant.fields {
1170                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1171                             _ => bug!(),
1172                         };
1173                         let mut fields = field_layouts
1174                             .iter()
1175                             .zip(offsets)
1176                             .filter(|p| !p.0.is_zst());
1177                         let (field, offset) = match (fields.next(), fields.next()) {
1178                             (None, None) => continue,
1179                             (Some(pair), None) => pair,
1180                             _ => {
1181                                 common_prim = None;
1182                                 break;
1183                             }
1184                         };
1185                         let prim = match field.details.abi {
1186                             Abi::Scalar(ref scalar) => scalar.value,
1187                             _ => {
1188                                 common_prim = None;
1189                                 break;
1190                             }
1191                         };
1192                         if let Some(pair) = common_prim {
1193                             // This is pretty conservative. We could go fancier
1194                             // by conflating things like i32 and u32, or even
1195                             // realising that (u8, u8) could just cohabit with
1196                             // u16 or even u32.
1197                             if pair != (prim, offset) {
1198                                 common_prim = None;
1199                                 break;
1200                             }
1201                         } else {
1202                             common_prim = Some((prim, offset));
1203                         }
1204                     }
1205                     if let Some((prim, offset)) = common_prim {
1206                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1207                         let pair_offsets = match pair.fields {
1208                             FieldPlacement::Arbitrary {
1209                                 ref offsets,
1210                                 ref memory_index
1211                             } => {
1212                                 assert_eq!(memory_index, &[0, 1]);
1213                                 offsets
1214                             }
1215                             _ => bug!()
1216                         };
1217                         if pair_offsets[0] == Size::ZERO &&
1218                             pair_offsets[1] == *offset &&
1219                             align == pair.align &&
1220                             size == pair.size {
1221                             // We can use `ScalarPair` only when it matches our
1222                             // already computed layout (including `#[repr(C)]`).
1223                             abi = pair.abi;
1224                         }
1225                     }
1226                 }
1227
1228                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1229                     abi = Abi::Uninhabited;
1230                 }
1231
1232                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1233
1234                 tcx.intern_layout(LayoutDetails {
1235                     variants: Variants::Multiple {
1236                         discr: tag,
1237                         discr_kind: DiscriminantKind::Tag,
1238                         discr_index: 0,
1239                         variants: layout_variants,
1240                     },
1241                     fields: FieldPlacement::Arbitrary {
1242                         offsets: vec![Size::ZERO],
1243                         memory_index: vec![0]
1244                     },
1245                     largest_niche,
1246                     abi,
1247                     align,
1248                     size
1249                 })
1250             }
1251
1252             // Types with no meaningful known layout.
1253             ty::Projection(_) | ty::Opaque(..) => {
1254                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1255                 if ty == normalized {
1256                     return Err(LayoutError::Unknown(ty));
1257                 }
1258                 tcx.layout_raw(param_env.and(normalized))?
1259             }
1260
1261             ty::Bound(..) |
1262             ty::Placeholder(..) |
1263             ty::UnnormalizedProjection(..) |
1264             ty::GeneratorWitness(..) |
1265             ty::Infer(_) => {
1266                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1267             }
1268
1269             ty::Param(_) | ty::Error => {
1270                 return Err(LayoutError::Unknown(ty));
1271             }
1272         })
1273     }
1274 }
1275
1276 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1277 #[derive(Clone, Debug, PartialEq)]
1278 enum SavedLocalEligibility {
1279     Unassigned,
1280     Assigned(VariantIdx),
1281     // FIXME: Use newtype_index so we aren't wasting bytes
1282     Ineligible(Option<u32>),
1283 }
1284
1285 // When laying out generators, we divide our saved local fields into two
1286 // categories: overlap-eligible and overlap-ineligible.
1287 //
1288 // Those fields which are ineligible for overlap go in a "prefix" at the
1289 // beginning of the layout, and always have space reserved for them.
1290 //
1291 // Overlap-eligible fields are only assigned to one variant, so we lay
1292 // those fields out for each variant and put them right after the
1293 // prefix.
1294 //
1295 // Finally, in the layout details, we point to the fields from the
1296 // variants they are assigned to. It is possible for some fields to be
1297 // included in multiple variants. No field ever "moves around" in the
1298 // layout; its offset is always the same.
1299 //
1300 // Also included in the layout are the upvars and the discriminant.
1301 // These are included as fields on the "outer" layout; they are not part
1302 // of any variant.
1303 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1304     /// Compute the eligibility and assignment of each local.
1305     fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>)
1306     -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1307         use SavedLocalEligibility::*;
1308
1309         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1310             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1311
1312         // The saved locals not eligible for overlap. These will get
1313         // "promoted" to the prefix of our generator.
1314         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1315
1316         // Figure out which of our saved locals are fields in only
1317         // one variant. The rest are deemed ineligible for overlap.
1318         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1319             for local in fields {
1320                 match assignments[*local] {
1321                     Unassigned => {
1322                         assignments[*local] = Assigned(variant_index);
1323                     }
1324                     Assigned(idx) => {
1325                         // We've already seen this local at another suspension
1326                         // point, so it is no longer a candidate.
1327                         trace!("removing local {:?} in >1 variant ({:?}, {:?})",
1328                                local, variant_index, idx);
1329                         ineligible_locals.insert(*local);
1330                         assignments[*local] = Ineligible(None);
1331                     }
1332                     Ineligible(_) => {},
1333                 }
1334             }
1335         }
1336
1337         // Next, check every pair of eligible locals to see if they
1338         // conflict.
1339         for local_a in info.storage_conflicts.rows() {
1340             let conflicts_a = info.storage_conflicts.count(local_a);
1341             if ineligible_locals.contains(local_a) {
1342                 continue;
1343             }
1344
1345             for local_b in info.storage_conflicts.iter(local_a) {
1346                 // local_a and local_b are storage live at the same time, therefore they
1347                 // cannot overlap in the generator layout. The only way to guarantee
1348                 // this is if they are in the same variant, or one is ineligible
1349                 // (which means it is stored in every variant).
1350                 if ineligible_locals.contains(local_b) ||
1351                     assignments[local_a] == assignments[local_b]
1352                 {
1353                     continue;
1354                 }
1355
1356                 // If they conflict, we will choose one to make ineligible.
1357                 // This is not always optimal; it's just a greedy heuristic that
1358                 // seems to produce good results most of the time.
1359                 let conflicts_b = info.storage_conflicts.count(local_b);
1360                 let (remove, other) = if conflicts_a > conflicts_b {
1361                     (local_a, local_b)
1362                 } else {
1363                     (local_b, local_a)
1364                 };
1365                 ineligible_locals.insert(remove);
1366                 assignments[remove] = Ineligible(None);
1367                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1368             }
1369         }
1370
1371         // Write down the order of our locals that will be promoted to the prefix.
1372         {
1373             let mut idx = 0u32;
1374             for local in ineligible_locals.iter() {
1375                 assignments[local] = Ineligible(Some(idx));
1376                 idx += 1;
1377             }
1378         }
1379         debug!("generator saved local assignments: {:?}", assignments);
1380
1381         (ineligible_locals, assignments)
1382     }
1383
1384     /// Compute the full generator layout.
1385     fn generator_layout(
1386         &self,
1387         ty: Ty<'tcx>,
1388         def_id: hir::def_id::DefId,
1389         substs: &GeneratorSubsts<'tcx>,
1390     ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1391         use SavedLocalEligibility::*;
1392         let tcx = self.tcx;
1393
1394         let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) };
1395
1396         let info = tcx.generator_layout(def_id);
1397         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1398
1399         // Build a prefix layout, including "promoting" all ineligible
1400         // locals as part of the prefix. We compute the layout of all of
1401         // these fields at once to get optimal packing.
1402         let discr_index = substs.prefix_tys(def_id, tcx).count();
1403         // FIXME(eddyb) set the correct vaidity range for the discriminant.
1404         let discr_layout = self.layout_of(substs.discr_ty(tcx))?;
1405         let discr = match &discr_layout.abi {
1406             Abi::Scalar(s) => s.clone(),
1407             _ => bug!(),
1408         };
1409         // FIXME(eddyb) wrap each promoted type in `MaybeUninit` so that they
1410         // don't poison the `largest_niche` or `abi` fields of `prefix`.
1411         let promoted_layouts = ineligible_locals.iter()
1412             .map(|local| subst_field(info.field_tys[local]))
1413             .map(|ty| self.layout_of(ty));
1414         let prefix_layouts = substs.prefix_tys(def_id, tcx)
1415             .map(|ty| self.layout_of(ty))
1416             .chain(iter::once(Ok(discr_layout)))
1417             .chain(promoted_layouts)
1418             .collect::<Result<Vec<_>, _>>()?;
1419         let mut prefix = self.univariant_uninterned(
1420             ty,
1421             &prefix_layouts,
1422             &ReprOptions::default(),
1423             StructKind::AlwaysSized,
1424         )?;
1425         // FIXME(eddyb) need `MaybeUninit` around promoted types (see above).
1426         prefix.largest_niche = None;
1427
1428         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1429
1430         // Split the prefix layout into the "outer" fields (upvars and
1431         // discriminant) and the "promoted" fields. Promoted fields will
1432         // get included in each variant that requested them in
1433         // GeneratorLayout.
1434         debug!("prefix = {:#?}", prefix);
1435         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1436             FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1437                 let mut inverse_memory_index = invert_mapping(&memory_index);
1438
1439                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1440                 // "outer" and "promoted" fields respectively.
1441                 let b_start = (discr_index + 1) as u32;
1442                 let offsets_b = offsets.split_off(b_start as usize);
1443                 let offsets_a = offsets;
1444
1445                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1446                 // by preserving the order but keeping only one disjoint "half" each.
1447                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1448                 let inverse_memory_index_b: Vec<_> =
1449                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1450                 inverse_memory_index.retain(|&i| i < b_start);
1451                 let inverse_memory_index_a = inverse_memory_index;
1452
1453                 // Since `inverse_memory_index_{a,b}` each only refer to their
1454                 // respective fields, they can be safely inverted
1455                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1456                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1457
1458                 let outer_fields = FieldPlacement::Arbitrary {
1459                     offsets: offsets_a,
1460                     memory_index: memory_index_a,
1461                 };
1462                 (outer_fields, offsets_b, memory_index_b)
1463             }
1464             _ => bug!(),
1465         };
1466
1467         let mut size = prefix.size;
1468         let mut align = prefix.align;
1469         let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| {
1470             // Only include overlap-eligible fields when we compute our variant layout.
1471             let variant_only_tys = variant_fields
1472                 .iter()
1473                 .filter(|local| {
1474                     match assignments[**local] {
1475                         Unassigned => bug!(),
1476                         Assigned(v) if v == index => true,
1477                         Assigned(_) => bug!("assignment does not match variant"),
1478                         Ineligible(_) => false,
1479                     }
1480                 })
1481                 .map(|local| subst_field(info.field_tys[*local]));
1482
1483             let mut variant = self.univariant_uninterned(
1484                 ty,
1485                 &variant_only_tys
1486                     .map(|ty| self.layout_of(ty))
1487                     .collect::<Result<Vec<_>, _>>()?,
1488                 &ReprOptions::default(),
1489                 StructKind::Prefixed(prefix_size, prefix_align.abi))?;
1490             variant.variants = Variants::Single { index };
1491
1492             let (offsets, memory_index) = match variant.fields {
1493                 FieldPlacement::Arbitrary { offsets, memory_index } => {
1494                     (offsets, memory_index)
1495                 }
1496                 _ => bug!(),
1497             };
1498
1499             // Now, stitch the promoted and variant-only fields back together in
1500             // the order they are mentioned by our GeneratorLayout.
1501             // Because we only use some subset (that can differ between variants)
1502             // of the promoted fields, we can't just pick those elements of the
1503             // `promoted_memory_index` (as we'd end up with gaps).
1504             // So instead, we build an "inverse memory_index", as if all of the
1505             // promoted fields were being used, but leave the elements not in the
1506             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1507             // obtain a valid (bijective) mapping.
1508             const INVALID_FIELD_IDX: u32 = !0;
1509             let mut combined_inverse_memory_index =
1510                 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1511             let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1512             let combined_offsets = variant_fields.iter().enumerate().map(|(i, local)| {
1513                 let (offset, memory_index) = match assignments[*local] {
1514                     Unassigned => bug!(),
1515                     Assigned(_) => {
1516                         let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1517                         (offset, promoted_memory_index.len() as u32 + memory_index)
1518                     }
1519                     Ineligible(field_idx) => {
1520                         let field_idx = field_idx.unwrap() as usize;
1521                         (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1522                     }
1523                 };
1524                 combined_inverse_memory_index[memory_index as usize] = i as u32;
1525                 offset
1526             }).collect();
1527
1528             // Remove the unused slots and invert the mapping to obtain the
1529             // combined `memory_index` (also see previous comment).
1530             combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1531             let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1532
1533             variant.fields = FieldPlacement::Arbitrary {
1534                 offsets: combined_offsets,
1535                 memory_index: combined_memory_index,
1536             };
1537
1538             size = size.max(variant.size);
1539             align = align.max(variant.align);
1540             Ok(variant)
1541         }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1542
1543         size = size.align_to(align.abi);
1544
1545         let abi = if prefix.abi.is_uninhabited() ||
1546                      variants.iter().all(|v| v.abi.is_uninhabited()) {
1547             Abi::Uninhabited
1548         } else {
1549             Abi::Aggregate { sized: true }
1550         };
1551
1552         let layout = tcx.intern_layout(LayoutDetails {
1553             variants: Variants::Multiple {
1554                 discr,
1555                 discr_kind: DiscriminantKind::Tag,
1556                 discr_index,
1557                 variants,
1558             },
1559             fields: outer_fields,
1560             abi,
1561             largest_niche: prefix.largest_niche,
1562             size,
1563             align,
1564         });
1565         debug!("generator layout ({:?}): {:#?}", ty, layout);
1566         Ok(layout)
1567     }
1568
1569     /// This is invoked by the `layout_raw` query to record the final
1570     /// layout of each type.
1571     #[inline(always)]
1572     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1573         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1574         // for dumping later.
1575         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1576             self.record_layout_for_printing_outlined(layout)
1577         }
1578     }
1579
1580     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1581         // Ignore layouts that are done with non-empty environments or
1582         // non-monomorphic layouts, as the user only wants to see the stuff
1583         // resulting from the final codegen session.
1584         if
1585             layout.ty.has_param_types() ||
1586             layout.ty.has_self_ty() ||
1587             !self.param_env.caller_bounds.is_empty()
1588         {
1589             return;
1590         }
1591
1592         // (delay format until we actually need it)
1593         let record = |kind, packed, opt_discr_size, variants| {
1594             let type_desc = format!("{:?}", layout.ty);
1595             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1596                                                                    type_desc,
1597                                                                    layout.align.abi,
1598                                                                    layout.size,
1599                                                                    packed,
1600                                                                    opt_discr_size,
1601                                                                    variants);
1602         };
1603
1604         let adt_def = match layout.ty.sty {
1605             ty::Adt(ref adt_def, _) => {
1606                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1607                 adt_def
1608             }
1609
1610             ty::Closure(..) => {
1611                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1612                 record(DataTypeKind::Closure, false, None, vec![]);
1613                 return;
1614             }
1615
1616             _ => {
1617                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1618                 return;
1619             }
1620         };
1621
1622         let adt_kind = adt_def.adt_kind();
1623         let adt_packed = adt_def.repr.packed();
1624
1625         let build_variant_info = |n: Option<Ident>,
1626                                   flds: &[ast::Name],
1627                                   layout: TyLayout<'tcx>| {
1628             let mut min_size = Size::ZERO;
1629             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1630                 match layout.field(self, i) {
1631                     Err(err) => {
1632                         bug!("no layout found for field {}: `{:?}`", name, err);
1633                     }
1634                     Ok(field_layout) => {
1635                         let offset = layout.fields.offset(i);
1636                         let field_end = offset + field_layout.size;
1637                         if min_size < field_end {
1638                             min_size = field_end;
1639                         }
1640                         session::FieldInfo {
1641                             name: name.to_string(),
1642                             offset: offset.bytes(),
1643                             size: field_layout.size.bytes(),
1644                             align: field_layout.align.abi.bytes(),
1645                         }
1646                     }
1647                 }
1648             }).collect();
1649
1650             session::VariantInfo {
1651                 name: n.map(|n| n.to_string()),
1652                 kind: if layout.is_unsized() {
1653                     session::SizeKind::Min
1654                 } else {
1655                     session::SizeKind::Exact
1656                 },
1657                 align: layout.align.abi.bytes(),
1658                 size: if min_size.bytes() == 0 {
1659                     layout.size.bytes()
1660                 } else {
1661                     min_size.bytes()
1662                 },
1663                 fields: field_info,
1664             }
1665         };
1666
1667         match layout.variants {
1668             Variants::Single { index } => {
1669                 debug!("print-type-size `{:#?}` variant {}",
1670                        layout, adt_def.variants[index].ident);
1671                 if !adt_def.variants.is_empty() {
1672                     let variant_def = &adt_def.variants[index];
1673                     let fields: Vec<_> =
1674                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1675                     record(adt_kind.into(),
1676                            adt_packed,
1677                            None,
1678                            vec![build_variant_info(Some(variant_def.ident),
1679                                                    &fields,
1680                                                    layout)]);
1681                 } else {
1682                     // (This case arises for *empty* enums; so give it
1683                     // zero variants.)
1684                     record(adt_kind.into(), adt_packed, None, vec![]);
1685                 }
1686             }
1687
1688             Variants::Multiple { ref discr, ref discr_kind, .. } => {
1689                 debug!("print-type-size `{:#?}` adt general variants def {}",
1690                        layout.ty, adt_def.variants.len());
1691                 let variant_infos: Vec<_> =
1692                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1693                         let fields: Vec<_> =
1694                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1695                         build_variant_info(Some(variant_def.ident),
1696                                            &fields,
1697                                            layout.for_variant(self, i))
1698                     })
1699                     .collect();
1700                 record(adt_kind.into(), adt_packed, match discr_kind {
1701                     DiscriminantKind::Tag => Some(discr.value.size(self)),
1702                     _ => None
1703                 }, variant_infos);
1704             }
1705         }
1706     }
1707 }
1708
1709 /// Type size "skeleton", i.e., the only information determining a type's size.
1710 /// While this is conservative, (aside from constant sizes, only pointers,
1711 /// newtypes thereof and null pointer optimized enums are allowed), it is
1712 /// enough to statically check common use cases of transmute.
1713 #[derive(Copy, Clone, Debug)]
1714 pub enum SizeSkeleton<'tcx> {
1715     /// Any statically computable Layout.
1716     Known(Size),
1717
1718     /// A potentially-fat pointer.
1719     Pointer {
1720         /// If true, this pointer is never null.
1721         non_zero: bool,
1722         /// The type which determines the unsized metadata, if any,
1723         /// of this pointer. Either a type parameter or a projection
1724         /// depending on one, with regions erased.
1725         tail: Ty<'tcx>
1726     }
1727 }
1728
1729 impl<'tcx> SizeSkeleton<'tcx> {
1730     pub fn compute(
1731         ty: Ty<'tcx>,
1732         tcx: TyCtxt<'tcx>,
1733         param_env: ty::ParamEnv<'tcx>,
1734     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1735         debug_assert!(!ty.has_infer_types());
1736
1737         // First try computing a static layout.
1738         let err = match tcx.layout_of(param_env.and(ty)) {
1739             Ok(layout) => {
1740                 return Ok(SizeSkeleton::Known(layout.size));
1741             }
1742             Err(err) => err
1743         };
1744
1745         match ty.sty {
1746             ty::Ref(_, pointee, _) |
1747             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1748                 let non_zero = !ty.is_unsafe_ptr();
1749                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1750                 match tail.sty {
1751                     ty::Param(_) | ty::Projection(_) => {
1752                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1753                         Ok(SizeSkeleton::Pointer {
1754                             non_zero,
1755                             tail: tcx.erase_regions(&tail)
1756                         })
1757                     }
1758                     _ => {
1759                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1760                               tail `{}` is not a type parameter or a projection",
1761                              ty, err, tail)
1762                     }
1763                 }
1764             }
1765
1766             ty::Adt(def, substs) => {
1767                 // Only newtypes and enums w/ nullable pointer optimization.
1768                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1769                     return Err(err);
1770                 }
1771
1772                 // Get a zero-sized variant or a pointer newtype.
1773                 let zero_or_ptr_variant = |i| {
1774                     let i = VariantIdx::new(i);
1775                     let fields = def.variants[i].fields.iter().map(|field| {
1776                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1777                     });
1778                     let mut ptr = None;
1779                     for field in fields {
1780                         let field = field?;
1781                         match field {
1782                             SizeSkeleton::Known(size) => {
1783                                 if size.bytes() > 0 {
1784                                     return Err(err);
1785                                 }
1786                             }
1787                             SizeSkeleton::Pointer {..} => {
1788                                 if ptr.is_some() {
1789                                     return Err(err);
1790                                 }
1791                                 ptr = Some(field);
1792                             }
1793                         }
1794                     }
1795                     Ok(ptr)
1796                 };
1797
1798                 let v0 = zero_or_ptr_variant(0)?;
1799                 // Newtype.
1800                 if def.variants.len() == 1 {
1801                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1802                         return Ok(SizeSkeleton::Pointer {
1803                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1804                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1805                                 (Bound::Included(start), Bound::Included(end)) =>
1806                                     0 < start && start < end,
1807                                 _ => false,
1808                             },
1809                             tail,
1810                         });
1811                     } else {
1812                         return Err(err);
1813                     }
1814                 }
1815
1816                 let v1 = zero_or_ptr_variant(1)?;
1817                 // Nullable pointer enum optimization.
1818                 match (v0, v1) {
1819                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1820                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1821                         Ok(SizeSkeleton::Pointer {
1822                             non_zero: false,
1823                             tail,
1824                         })
1825                     }
1826                     _ => Err(err)
1827                 }
1828             }
1829
1830             ty::Projection(_) | ty::Opaque(..) => {
1831                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1832                 if ty == normalized {
1833                     Err(err)
1834                 } else {
1835                     SizeSkeleton::compute(normalized, tcx, param_env)
1836                 }
1837             }
1838
1839             _ => Err(err)
1840         }
1841     }
1842
1843     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1844         match (self, other) {
1845             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1846             (SizeSkeleton::Pointer { tail: a, .. },
1847              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1848             _ => false
1849         }
1850     }
1851 }
1852
1853 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1854     fn tcx(&self) -> TyCtxt<'tcx>;
1855 }
1856
1857 pub trait HasParamEnv<'tcx> {
1858     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1859 }
1860
1861 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1862     fn data_layout(&self) -> &TargetDataLayout {
1863         &self.data_layout
1864     }
1865 }
1866
1867 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1868     fn tcx(&self) -> TyCtxt<'tcx> {
1869         self.global_tcx()
1870     }
1871 }
1872
1873 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1874     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1875         self.param_env
1876     }
1877 }
1878
1879 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1880     fn data_layout(&self) -> &TargetDataLayout {
1881         self.tcx.data_layout()
1882     }
1883 }
1884
1885 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1886     fn tcx(&self) -> TyCtxt<'tcx> {
1887         self.tcx.tcx()
1888     }
1889 }
1890
1891 pub trait MaybeResult<T> {
1892     type Error;
1893
1894     fn from(x: Result<T, Self::Error>) -> Self;
1895     fn to_result(self) -> Result<T, Self::Error>;
1896 }
1897
1898 impl<T> MaybeResult<T> for T {
1899     type Error = !;
1900
1901     fn from(x: Result<T, Self::Error>) -> Self {
1902         let Ok(x) = x;
1903         x
1904     }
1905     fn to_result(self) -> Result<T, Self::Error> {
1906         Ok(self)
1907     }
1908 }
1909
1910 impl<T, E> MaybeResult<T> for Result<T, E> {
1911     type Error = E;
1912
1913     fn from(x: Result<T, Self::Error>) -> Self {
1914         x
1915     }
1916     fn to_result(self) -> Result<T, Self::Error> {
1917         self
1918     }
1919 }
1920
1921 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1922
1923 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1924     type Ty = Ty<'tcx>;
1925     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1926
1927     /// Computes the layout of a type. Note that this implicitly
1928     /// executes in "reveal all" mode.
1929     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1930         let param_env = self.param_env.with_reveal_all();
1931         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1932         let details = self.tcx.layout_raw(param_env.and(ty))?;
1933         let layout = TyLayout {
1934             ty,
1935             details
1936         };
1937
1938         // N.B., this recording is normally disabled; when enabled, it
1939         // can however trigger recursive invocations of `layout_of`.
1940         // Therefore, we execute it *after* the main query has
1941         // completed, to avoid problems around recursive structures
1942         // and the like. (Admittedly, I wasn't able to reproduce a problem
1943         // here, but it seems like the right thing to do. -nmatsakis)
1944         self.record_layout_for_printing(layout);
1945
1946         Ok(layout)
1947     }
1948 }
1949
1950 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1951     type Ty = Ty<'tcx>;
1952     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1953
1954     /// Computes the layout of a type. Note that this implicitly
1955     /// executes in "reveal all" mode.
1956     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1957         let param_env = self.param_env.with_reveal_all();
1958         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1959         let details = self.tcx.layout_raw(param_env.and(ty))?;
1960         let layout = TyLayout {
1961             ty,
1962             details
1963         };
1964
1965         // N.B., this recording is normally disabled; when enabled, it
1966         // can however trigger recursive invocations of `layout_of`.
1967         // Therefore, we execute it *after* the main query has
1968         // completed, to avoid problems around recursive structures
1969         // and the like. (Admittedly, I wasn't able to reproduce a problem
1970         // here, but it seems like the right thing to do. -nmatsakis)
1971         let cx = LayoutCx {
1972             tcx: *self.tcx,
1973             param_env: self.param_env
1974         };
1975         cx.record_layout_for_printing(layout);
1976
1977         Ok(layout)
1978     }
1979 }
1980
1981 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1982 impl TyCtxt<'tcx> {
1983     /// Computes the layout of a type. Note that this implicitly
1984     /// executes in "reveal all" mode.
1985     #[inline]
1986     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1987                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1988         let cx = LayoutCx {
1989             tcx: self.global_tcx(),
1990             param_env: param_env_and_ty.param_env
1991         };
1992         cx.layout_of(param_env_and_ty.value)
1993     }
1994 }
1995
1996 impl ty::query::TyCtxtAt<'tcx> {
1997     /// Computes the layout of a type. Note that this implicitly
1998     /// executes in "reveal all" mode.
1999     #[inline]
2000     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2001                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2002         let cx = LayoutCx {
2003             tcx: self.global_tcx().at(self.span),
2004             param_env: param_env_and_ty.param_env
2005         };
2006         cx.layout_of(param_env_and_ty.value)
2007     }
2008 }
2009
2010 impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
2011 where
2012     C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
2013     C::TyLayout: MaybeResult<TyLayout<'tcx>>,
2014     C: HasParamEnv<'tcx>,
2015 {
2016     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
2017         let details = match this.variants {
2018             Variants::Single { index } if index == variant_index => this.details,
2019
2020             Variants::Single { index } => {
2021                 // Deny calling for_variant more than once for non-Single enums.
2022                 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
2023                     assert_eq!(layout.variants, Variants::Single { index });
2024                 }
2025
2026                 let fields = match this.ty.sty {
2027                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2028                     _ => bug!()
2029                 };
2030                 let tcx = cx.tcx();
2031                 tcx.intern_layout(LayoutDetails {
2032                     variants: Variants::Single { index: variant_index },
2033                     fields: FieldPlacement::Union(fields),
2034                     abi: Abi::Uninhabited,
2035                     largest_niche: None,
2036                     align: tcx.data_layout.i8_align,
2037                     size: Size::ZERO
2038                 })
2039             }
2040
2041             Variants::Multiple { ref variants, .. } => {
2042                 &variants[variant_index]
2043             }
2044         };
2045
2046         assert_eq!(details.variants, Variants::Single { index: variant_index });
2047
2048         TyLayout {
2049             ty: this.ty,
2050             details
2051         }
2052     }
2053
2054     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
2055         let tcx = cx.tcx();
2056         let discr_layout = |discr: &Scalar| -> C::TyLayout {
2057             let layout = LayoutDetails::scalar(cx, discr.clone());
2058             MaybeResult::from(Ok(TyLayout {
2059                 details: tcx.intern_layout(layout),
2060                 ty: discr.value.to_ty(tcx),
2061             }))
2062         };
2063
2064         cx.layout_of(match this.ty.sty {
2065             ty::Bool |
2066             ty::Char |
2067             ty::Int(_) |
2068             ty::Uint(_) |
2069             ty::Float(_) |
2070             ty::FnPtr(_) |
2071             ty::Never |
2072             ty::FnDef(..) |
2073             ty::GeneratorWitness(..) |
2074             ty::Foreign(..) |
2075             ty::Dynamic(..) => {
2076                 bug!("TyLayout::field_type({:?}): not applicable", this)
2077             }
2078
2079             // Potentially-fat pointers.
2080             ty::Ref(_, pointee, _) |
2081             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2082                 assert!(i < this.fields.count());
2083
2084                 // Reuse the fat *T type as its own thin pointer data field.
2085                 // This provides information about e.g., DST struct pointees
2086                 // (which may have no non-DST form), and will work as long
2087                 // as the `Abi` or `FieldPlacement` is checked by users.
2088                 if i == 0 {
2089                     let nil = tcx.mk_unit();
2090                     let ptr_ty = if this.ty.is_unsafe_ptr() {
2091                         tcx.mk_mut_ptr(nil)
2092                     } else {
2093                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2094                     };
2095                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2096                         ptr_layout.ty = this.ty;
2097                         ptr_layout
2098                     }));
2099                 }
2100
2101                 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).sty {
2102                     ty::Slice(_) |
2103                     ty::Str => tcx.types.usize,
2104                     ty::Dynamic(_, _) => {
2105                         tcx.mk_imm_ref(
2106                             tcx.lifetimes.re_static,
2107                             tcx.mk_array(tcx.types.usize, 3),
2108                         )
2109                         /* FIXME: use actual fn pointers
2110                         Warning: naively computing the number of entries in the
2111                         vtable by counting the methods on the trait + methods on
2112                         all parent traits does not work, because some methods can
2113                         be not object safe and thus excluded from the vtable.
2114                         Increase this counter if you tried to implement this but
2115                         failed to do it without duplicating a lot of code from
2116                         other places in the compiler: 2
2117                         tcx.mk_tup(&[
2118                             tcx.mk_array(tcx.types.usize, 3),
2119                             tcx.mk_array(Option<fn()>),
2120                         ])
2121                         */
2122                     }
2123                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
2124                 }
2125             }
2126
2127             // Arrays and slices.
2128             ty::Array(element, _) |
2129             ty::Slice(element) => element,
2130             ty::Str => tcx.types.u8,
2131
2132             // Tuples, generators and closures.
2133             ty::Closure(def_id, ref substs) => {
2134                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2135             }
2136
2137             ty::Generator(def_id, ref substs, _) => {
2138                 match this.variants {
2139                     Variants::Single { index } => {
2140                         substs.state_tys(def_id, tcx)
2141                             .nth(index.as_usize()).unwrap()
2142                             .nth(i).unwrap()
2143                     }
2144                     Variants::Multiple { ref discr, discr_index, .. } => {
2145                         if i == discr_index {
2146                             return discr_layout(discr);
2147                         }
2148                         substs.prefix_tys(def_id, tcx).nth(i).unwrap()
2149                     }
2150                 }
2151             }
2152
2153             ty::Tuple(tys) => tys[i].expect_ty(),
2154
2155             // SIMD vector types.
2156             ty::Adt(def, ..) if def.repr.simd() => {
2157                 this.ty.simd_type(tcx)
2158             }
2159
2160             // ADTs.
2161             ty::Adt(def, substs) => {
2162                 match this.variants {
2163                     Variants::Single { index } => {
2164                         def.variants[index].fields[i].ty(tcx, substs)
2165                     }
2166
2167                     // Discriminant field for enums (where applicable).
2168                     Variants::Multiple { ref discr, .. } => {
2169                         assert_eq!(i, 0);
2170                         return discr_layout(discr);
2171                     }
2172                 }
2173             }
2174
2175             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
2176             ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
2177             ty::Error => {
2178                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
2179             }
2180         })
2181     }
2182
2183     fn pointee_info_at(
2184         this: TyLayout<'tcx>,
2185         cx: &C,
2186         offset: Size,
2187     ) -> Option<PointeeInfo> {
2188         match this.ty.sty {
2189             ty::RawPtr(mt) if offset.bytes() == 0 => {
2190                 cx.layout_of(mt.ty).to_result().ok()
2191                     .map(|layout| PointeeInfo {
2192                         size: layout.size,
2193                         align: layout.align.abi,
2194                         safe: None,
2195                     })
2196             }
2197
2198             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2199                 let tcx = cx.tcx();
2200                 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2201                 let kind = match mt {
2202                     hir::MutImmutable => if is_freeze {
2203                         PointerKind::Frozen
2204                     } else {
2205                         PointerKind::Shared
2206                     },
2207                     hir::MutMutable => {
2208                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2209                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2210                         // in conjunction with unwinding, but later versions didn’t seem to have
2211                         // said issues. See issue #31681.
2212                         //
2213                         // Alas, later on we encountered a case where noalias would generate wrong
2214                         // code altogether even with recent versions of LLVM in *safe* code with no
2215                         // unwinding involved. See #54462.
2216                         //
2217                         // For now, do not enable mutable_noalias by default at all, while the
2218                         // issue is being figured out.
2219                         let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
2220                             .unwrap_or(false);
2221                         if mutable_noalias {
2222                             PointerKind::UniqueBorrowed
2223                         } else {
2224                             PointerKind::Shared
2225                         }
2226                     }
2227                 };
2228
2229                 cx.layout_of(ty).to_result().ok()
2230                     .map(|layout| PointeeInfo {
2231                         size: layout.size,
2232                         align: layout.align.abi,
2233                         safe: Some(kind),
2234                     })
2235             }
2236
2237             _ => {
2238                 let mut data_variant = match this.variants {
2239                     // Within the discriminant field, only the niche itself is
2240                     // always initialized, so we only check for a pointer at its
2241                     // offset.
2242                     //
2243                     // If the niche is a pointer, it's either valid (according
2244                     // to its type), or null (which the niche field's scalar
2245                     // validity range encodes).  This allows using
2246                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2247                     // this will continue to work as long as we don't start
2248                     // using more niches than just null (e.g., the first page of
2249                     // the address space, or unaligned pointers).
2250                     Variants::Multiple {
2251                         discr_kind: DiscriminantKind::Niche {
2252                             dataful_variant,
2253                             ..
2254                         },
2255                         discr_index,
2256                         ..
2257                     } if this.fields.offset(discr_index) == offset =>
2258                         Some(this.for_variant(cx, dataful_variant)),
2259                     _ => Some(this),
2260                 };
2261
2262                 if let Some(variant) = data_variant {
2263                     // We're not interested in any unions.
2264                     if let FieldPlacement::Union(_) = variant.fields {
2265                         data_variant = None;
2266                     }
2267                 }
2268
2269                 let mut result = None;
2270
2271                 if let Some(variant) = data_variant {
2272                     let ptr_end = offset + Pointer.size(cx);
2273                     for i in 0..variant.fields.count() {
2274                         let field_start = variant.fields.offset(i);
2275                         if field_start <= offset {
2276                             let field = variant.field(cx, i);
2277                             result = field.to_result().ok()
2278                                 .and_then(|field| {
2279                                     if ptr_end <= field_start + field.size {
2280                                         // We found the right field, look inside it.
2281                                         field.pointee_info_at(cx, offset - field_start)
2282                                     } else {
2283                                         None
2284                                     }
2285                                 });
2286                             if result.is_some() {
2287                                 break;
2288                             }
2289                         }
2290                     }
2291                 }
2292
2293                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2294                 if let Some(ref mut pointee) = result {
2295                     if let ty::Adt(def, _) = this.ty.sty {
2296                         if def.is_box() && offset.bytes() == 0 {
2297                             pointee.safe = Some(PointerKind::UniqueOwned);
2298                         }
2299                     }
2300                 }
2301
2302                 result
2303             }
2304         }
2305     }
2306 }
2307
2308 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
2309     fn hash_stable<W: StableHasherResult>(&self,
2310                                           hcx: &mut StableHashingContext<'a>,
2311                                           hasher: &mut StableHasher<W>) {
2312         use crate::ty::layout::Variants::*;
2313         mem::discriminant(self).hash_stable(hcx, hasher);
2314
2315         match *self {
2316             Single { index } => {
2317                 index.hash_stable(hcx, hasher);
2318             }
2319             Multiple {
2320                 ref discr,
2321                 ref discr_kind,
2322                 discr_index,
2323                 ref variants,
2324             } => {
2325                 discr.hash_stable(hcx, hasher);
2326                 discr_kind.hash_stable(hcx, hasher);
2327                 discr_index.hash_stable(hcx, hasher);
2328                 variants.hash_stable(hcx, hasher);
2329             }
2330         }
2331     }
2332 }
2333
2334 impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2335     fn hash_stable<W: StableHasherResult>(&self,
2336                                           hcx: &mut StableHashingContext<'a>,
2337                                           hasher: &mut StableHasher<W>) {
2338         use crate::ty::layout::DiscriminantKind::*;
2339         mem::discriminant(self).hash_stable(hcx, hasher);
2340
2341         match *self {
2342             Tag => {}
2343             Niche {
2344                 dataful_variant,
2345                 ref niche_variants,
2346                 niche_start,
2347             } => {
2348                 dataful_variant.hash_stable(hcx, hasher);
2349                 niche_variants.start().hash_stable(hcx, hasher);
2350                 niche_variants.end().hash_stable(hcx, hasher);
2351                 niche_start.hash_stable(hcx, hasher);
2352             }
2353         }
2354     }
2355 }
2356
2357 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
2358     fn hash_stable<W: StableHasherResult>(&self,
2359                                           hcx: &mut StableHashingContext<'a>,
2360                                           hasher: &mut StableHasher<W>) {
2361         use crate::ty::layout::FieldPlacement::*;
2362         mem::discriminant(self).hash_stable(hcx, hasher);
2363
2364         match *self {
2365             Union(count) => {
2366                 count.hash_stable(hcx, hasher);
2367             }
2368             Array { count, stride } => {
2369                 count.hash_stable(hcx, hasher);
2370                 stride.hash_stable(hcx, hasher);
2371             }
2372             Arbitrary { ref offsets, ref memory_index } => {
2373                 offsets.hash_stable(hcx, hasher);
2374                 memory_index.hash_stable(hcx, hasher);
2375             }
2376         }
2377     }
2378 }
2379
2380 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2381     fn hash_stable<W: StableHasherResult>(
2382         &self,
2383         hcx: &mut StableHashingContext<'a>,
2384         hasher: &mut StableHasher<W>,
2385     ) {
2386         self.as_u32().hash_stable(hcx, hasher)
2387     }
2388 }
2389
2390 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
2391     fn hash_stable<W: StableHasherResult>(&self,
2392                                           hcx: &mut StableHashingContext<'a>,
2393                                           hasher: &mut StableHasher<W>) {
2394         use crate::ty::layout::Abi::*;
2395         mem::discriminant(self).hash_stable(hcx, hasher);
2396
2397         match *self {
2398             Uninhabited => {}
2399             Scalar(ref value) => {
2400                 value.hash_stable(hcx, hasher);
2401             }
2402             ScalarPair(ref a, ref b) => {
2403                 a.hash_stable(hcx, hasher);
2404                 b.hash_stable(hcx, hasher);
2405             }
2406             Vector { ref element, count } => {
2407                 element.hash_stable(hcx, hasher);
2408                 count.hash_stable(hcx, hasher);
2409             }
2410             Aggregate { sized } => {
2411                 sized.hash_stable(hcx, hasher);
2412             }
2413         }
2414     }
2415 }
2416
2417 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
2418     fn hash_stable<W: StableHasherResult>(&self,
2419                                           hcx: &mut StableHashingContext<'a>,
2420                                           hasher: &mut StableHasher<W>) {
2421         let Scalar { value, ref valid_range } = *self;
2422         value.hash_stable(hcx, hasher);
2423         valid_range.start().hash_stable(hcx, hasher);
2424         valid_range.end().hash_stable(hcx, hasher);
2425     }
2426 }
2427
2428 impl_stable_hash_for!(struct crate::ty::layout::Niche {
2429     offset,
2430     scalar
2431 });
2432
2433 impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
2434     variants,
2435     fields,
2436     abi,
2437     largest_niche,
2438     size,
2439     align
2440 });
2441
2442 impl_stable_hash_for!(enum crate::ty::layout::Integer {
2443     I8,
2444     I16,
2445     I32,
2446     I64,
2447     I128
2448 });
2449
2450 impl_stable_hash_for!(enum crate::ty::layout::Primitive {
2451     Int(integer, signed),
2452     Float(fty),
2453     Pointer
2454 });
2455
2456 impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
2457     abi,
2458     pref
2459 });
2460
2461 impl<'tcx> HashStable<StableHashingContext<'tcx>> for Align {
2462     fn hash_stable<W: StableHasherResult>(
2463         &self,
2464         hcx: &mut StableHashingContext<'tcx>,
2465         hasher: &mut StableHasher<W>,
2466     ) {
2467         self.bytes().hash_stable(hcx, hasher);
2468     }
2469 }
2470
2471 impl<'tcx> HashStable<StableHashingContext<'tcx>> for Size {
2472     fn hash_stable<W: StableHasherResult>(
2473         &self,
2474         hcx: &mut StableHashingContext<'tcx>,
2475         hasher: &mut StableHasher<W>,
2476     ) {
2477         self.bytes().hash_stable(hcx, hasher);
2478     }
2479 }
2480
2481 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2482     fn hash_stable<W: StableHasherResult>(&self,
2483                                           hcx: &mut StableHashingContext<'a>,
2484                                           hasher: &mut StableHasher<W>) {
2485         use crate::ty::layout::LayoutError::*;
2486         mem::discriminant(self).hash_stable(hcx, hasher);
2487
2488         match *self {
2489             Unknown(t) |
2490             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2491         }
2492     }
2493 }
2494
2495 pub trait FnTypeExt<'tcx, C>
2496 where
2497     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2498         + HasDataLayout
2499         + HasTargetSpec
2500         + HasTyCtxt<'tcx>
2501         + HasParamEnv<'tcx>,
2502 {
2503     fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self;
2504     fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2505     fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2506     fn new_internal(
2507         cx: &C,
2508         sig: ty::FnSig<'tcx>,
2509         extra_args: &[Ty<'tcx>],
2510         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2511     ) -> Self;
2512     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2513 }
2514
2515 impl<'tcx, C> FnTypeExt<'tcx, C> for call::FnType<'tcx, Ty<'tcx>>
2516 where
2517     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2518         + HasDataLayout
2519         + HasTargetSpec
2520         + HasTyCtxt<'tcx>
2521         + HasParamEnv<'tcx>,
2522 {
2523     fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self {
2524         let sig = instance.fn_sig(cx.tcx());
2525         let sig = cx
2526             .tcx()
2527             .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2528         call::FnType::new(cx, sig, &[])
2529     }
2530
2531     fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2532         call::FnType::new_internal(cx, sig, extra_args, |ty, _| ArgType::new(cx.layout_of(ty)))
2533     }
2534
2535     fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2536         FnTypeExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
2537             let mut layout = cx.layout_of(ty);
2538             // Don't pass the vtable, it's not an argument of the virtual fn.
2539             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2540             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2541             if arg_idx == Some(0) {
2542                 let fat_pointer_ty = if layout.is_unsized() {
2543                     // unsized `self` is passed as a pointer to `self`
2544                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2545                     cx.tcx().mk_mut_ptr(layout.ty)
2546                 } else {
2547                     match layout.abi {
2548                         Abi::ScalarPair(..) => (),
2549                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2550                     }
2551
2552                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2553                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2554                     // elsewhere in the compiler as a method on a `dyn Trait`.
2555                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2556                     // get a built-in pointer type
2557                     let mut fat_pointer_layout = layout;
2558                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2559                         && !fat_pointer_layout.ty.is_region_ptr()
2560                     {
2561                         'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
2562                             let field_layout = fat_pointer_layout.field(cx, i);
2563
2564                             if !field_layout.is_zst() {
2565                                 fat_pointer_layout = field_layout;
2566                                 continue 'descend_newtypes;
2567                             }
2568                         }
2569
2570                         bug!(
2571                             "receiver has no non-zero-sized fields {:?}",
2572                             fat_pointer_layout
2573                         );
2574                     }
2575
2576                     fat_pointer_layout.ty
2577                 };
2578
2579                 // we now have a type like `*mut RcBox<dyn Trait>`
2580                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2581                 // this is understood as a special case elsewhere in the compiler
2582                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2583                 layout = cx.layout_of(unit_pointer_ty);
2584                 layout.ty = fat_pointer_ty;
2585             }
2586             ArgType::new(layout)
2587         })
2588     }
2589
2590     fn new_internal(
2591         cx: &C,
2592         sig: ty::FnSig<'tcx>,
2593         extra_args: &[Ty<'tcx>],
2594         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2595     ) -> Self {
2596         debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
2597
2598         use rustc_target::spec::abi::Abi::*;
2599         let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2600             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C,
2601
2602             // It's the ABI's job to select this, not ours.
2603             System => bug!("system abi should be selected elsewhere"),
2604
2605             Stdcall => Conv::X86Stdcall,
2606             Fastcall => Conv::X86Fastcall,
2607             Vectorcall => Conv::X86VectorCall,
2608             Thiscall => Conv::X86ThisCall,
2609             C => Conv::C,
2610             Unadjusted => Conv::C,
2611             Win64 => Conv::X86_64Win64,
2612             SysV64 => Conv::X86_64SysV,
2613             Aapcs => Conv::ArmAapcs,
2614             PtxKernel => Conv::PtxKernel,
2615             Msp430Interrupt => Conv::Msp430Intr,
2616             X86Interrupt => Conv::X86Intr,
2617             AmdGpuKernel => Conv::AmdGpuKernel,
2618
2619             // These API constants ought to be more specific...
2620             Cdecl => Conv::C,
2621         };
2622
2623         let mut inputs = sig.inputs();
2624         let extra_args = if sig.abi == RustCall {
2625             assert!(!sig.c_variadic && extra_args.is_empty());
2626
2627             match sig.inputs().last().unwrap().sty {
2628                 ty::Tuple(tupled_arguments) => {
2629                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2630                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2631                 }
2632                 _ => {
2633                     bug!(
2634                         "argument to function with \"rust-call\" ABI \
2635                          is not a tuple"
2636                     );
2637                 }
2638             }
2639         } else {
2640             assert!(sig.c_variadic || extra_args.is_empty());
2641             extra_args.to_vec()
2642         };
2643
2644         let target = &cx.tcx().sess.target.target;
2645         let win_x64_gnu =
2646             target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2647         let linux_s390x =
2648             target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu";
2649         let linux_sparc64 =
2650             target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu";
2651         let rust_abi = match sig.abi {
2652             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2653             _ => false,
2654         };
2655
2656         // Handle safe Rust thin and fat pointers.
2657         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2658                                       scalar: &Scalar,
2659                                       layout: TyLayout<'tcx>,
2660                                       offset: Size,
2661                                       is_return: bool| {
2662             // Booleans are always an i1 that needs to be zero-extended.
2663             if scalar.is_bool() {
2664                 attrs.set(ArgAttribute::ZExt);
2665                 return;
2666             }
2667
2668             // Only pointer types handled below.
2669             if scalar.value != Pointer {
2670                 return;
2671             }
2672
2673             if scalar.valid_range.start() < scalar.valid_range.end() {
2674                 if *scalar.valid_range.start() > 0 {
2675                     attrs.set(ArgAttribute::NonNull);
2676                 }
2677             }
2678
2679             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2680                 if let Some(kind) = pointee.safe {
2681                     attrs.pointee_size = pointee.size;
2682                     attrs.pointee_align = Some(pointee.align);
2683
2684                     // `Box` pointer parameters never alias because ownership is transferred
2685                     // `&mut` pointer parameters never alias other parameters,
2686                     // or mutable global data
2687                     //
2688                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2689                     // and can be marked as both `readonly` and `noalias`, as
2690                     // LLVM's definition of `noalias` is based solely on memory
2691                     // dependencies rather than pointer equality
2692                     let no_alias = match kind {
2693                         PointerKind::Shared => false,
2694                         PointerKind::UniqueOwned => true,
2695                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2696                     };
2697                     if no_alias {
2698                         attrs.set(ArgAttribute::NoAlias);
2699                     }
2700
2701                     if kind == PointerKind::Frozen && !is_return {
2702                         attrs.set(ArgAttribute::ReadOnly);
2703                     }
2704                 }
2705             }
2706         };
2707
2708         // Store the index of the last argument. This is useful for working with
2709         // C-compatible variadic arguments.
2710         let last_arg_idx = if sig.inputs().is_empty() {
2711             None
2712         } else {
2713             Some(sig.inputs().len() - 1)
2714         };
2715
2716         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2717             let is_return = arg_idx.is_none();
2718             let mut arg = mk_arg_type(ty, arg_idx);
2719             if arg.layout.is_zst() {
2720                 // For some forsaken reason, x86_64-pc-windows-gnu
2721                 // doesn't ignore zero-sized struct arguments.
2722                 // The same is true for s390x-unknown-linux-gnu
2723                 // and sparc64-unknown-linux-gnu.
2724                 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
2725                     arg.mode = PassMode::Ignore(IgnoreMode::Zst);
2726                 }
2727             }
2728
2729             // If this is a C-variadic function, this is not the return value,
2730             // and there is one or more fixed arguments; ensure that the `VaListImpl`
2731             // is ignored as an argument.
2732             if sig.c_variadic {
2733                 match (last_arg_idx, arg_idx) {
2734                     (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => {
2735                         let va_list_did = match cx.tcx().lang_items().va_list() {
2736                             Some(did) => did,
2737                             None => bug!("`va_list` lang item required for C-variadic functions"),
2738                         };
2739                         match ty.sty {
2740                             ty::Adt(def, _) if def.did == va_list_did => {
2741                                 // This is the "spoofed" `VaListImpl`. Set the arguments mode
2742                                 // so that it will be ignored.
2743                                 arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs);
2744                             }
2745                             _ => (),
2746                         }
2747                     }
2748                     _ => {}
2749                 }
2750             }
2751
2752             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2753             if !is_return && rust_abi {
2754                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2755                     let mut a_attrs = ArgAttributes::new();
2756                     let mut b_attrs = ArgAttributes::new();
2757                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2758                     adjust_for_rust_scalar(
2759                         &mut b_attrs,
2760                         b,
2761                         arg.layout,
2762                         a.value.size(cx).align_to(b.value.align(cx).abi),
2763                         false,
2764                     );
2765                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2766                     return arg;
2767                 }
2768             }
2769
2770             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2771                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2772                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2773                 }
2774             }
2775
2776             arg
2777         };
2778
2779         let mut fn_ty = FnType {
2780             ret: arg_of(sig.output(), None),
2781             args: inputs
2782                 .iter()
2783                 .cloned()
2784                 .chain(extra_args)
2785                 .enumerate()
2786                 .map(|(i, ty)| arg_of(ty, Some(i)))
2787                 .collect(),
2788             c_variadic: sig.c_variadic,
2789             conv,
2790         };
2791         fn_ty.adjust_for_abi(cx, sig.abi);
2792         fn_ty
2793     }
2794
2795     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2796         if abi == SpecAbi::Unadjusted {
2797             return;
2798         }
2799
2800         if abi == SpecAbi::Rust
2801             || abi == SpecAbi::RustCall
2802             || abi == SpecAbi::RustIntrinsic
2803             || abi == SpecAbi::PlatformIntrinsic
2804         {
2805             let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
2806                 if arg.is_ignore() {
2807                     return;
2808                 }
2809
2810                 match arg.layout.abi {
2811                     Abi::Aggregate { .. } => {}
2812
2813                     // This is a fun case! The gist of what this is doing is
2814                     // that we want callers and callees to always agree on the
2815                     // ABI of how they pass SIMD arguments. If we were to *not*
2816                     // make these arguments indirect then they'd be immediates
2817                     // in LLVM, which means that they'd used whatever the
2818                     // appropriate ABI is for the callee and the caller. That
2819                     // means, for example, if the caller doesn't have AVX
2820                     // enabled but the callee does, then passing an AVX argument
2821                     // across this boundary would cause corrupt data to show up.
2822                     //
2823                     // This problem is fixed by unconditionally passing SIMD
2824                     // arguments through memory between callers and callees
2825                     // which should get them all to agree on ABI regardless of
2826                     // target feature sets. Some more information about this
2827                     // issue can be found in #44367.
2828                     //
2829                     // Note that the platform intrinsic ABI is exempt here as
2830                     // that's how we connect up to LLVM and it's unstable
2831                     // anyway, we control all calls to it in libstd.
2832                     Abi::Vector { .. }
2833                         if abi != SpecAbi::PlatformIntrinsic
2834                             && cx.tcx().sess.target.target.options.simd_types_indirect =>
2835                     {
2836                         arg.make_indirect();
2837                         return;
2838                     }
2839
2840                     _ => return,
2841                 }
2842
2843                 let size = arg.layout.size;
2844                 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2845                     arg.make_indirect();
2846                 } else {
2847                     // We want to pass small aggregates as immediates, but using
2848                     // a LLVM aggregate type for this leads to bad optimizations,
2849                     // so we pick an appropriately sized integer type instead.
2850                     arg.cast_to(Reg {
2851                         kind: RegKind::Integer,
2852                         size,
2853                     });
2854                 }
2855             };
2856             fixup(&mut self.ret);
2857             for arg in &mut self.args {
2858                 fixup(arg);
2859             }
2860             if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2861                 attrs.set(ArgAttribute::StructRet);
2862             }
2863             return;
2864         }
2865
2866         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2867             cx.tcx().sess.fatal(&msg);
2868         }
2869     }
2870 }