]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
972452601ddd5c6ef192bf19a277177f8d76ce34
[rust.git] / src / librustc / ty / layout.rs
1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, subst::SubstsRef};
3
4 use syntax::ast::{self, Ident, IntTy, UintTy};
5 use syntax::attr;
6 use syntax_pos::DUMMY_SP;
7
8 use std::cmp;
9 use std::fmt;
10 use std::i128;
11 use std::iter;
12 use std::mem;
13 use std::ops::Bound;
14
15 use crate::hir;
16 use crate::ich::StableHashingContext;
17 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
18 use crate::ty::subst::Subst;
19 use rustc_index::bit_set::BitSet;
20 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
21 use rustc_index::vec::{IndexVec, Idx};
22
23 pub use rustc_target::abi::*;
24 use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
25 use rustc_target::abi::call::{
26     ArgAttribute, ArgAttributes, ArgAbi, Conv, FnAbi, PassMode, Reg, RegKind
27 };
28
29 pub trait IntegerExt {
30     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
31     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
32     fn repr_discr<'tcx>(
33         tcx: TyCtxt<'tcx>,
34         ty: Ty<'tcx>,
35         repr: &ReprOptions,
36         min: i128,
37         max: i128,
38     ) -> (Integer, bool);
39 }
40
41 impl IntegerExt for Integer {
42     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
43         match (*self, signed) {
44             (I8, false) => tcx.types.u8,
45             (I16, false) => tcx.types.u16,
46             (I32, false) => tcx.types.u32,
47             (I64, false) => tcx.types.u64,
48             (I128, false) => tcx.types.u128,
49             (I8, true) => tcx.types.i8,
50             (I16, true) => tcx.types.i16,
51             (I32, true) => tcx.types.i32,
52             (I64, true) => tcx.types.i64,
53             (I128, true) => tcx.types.i128,
54         }
55     }
56
57     /// Gets the Integer type from an attr::IntType.
58     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
59         let dl = cx.data_layout();
60
61         match ity {
62             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
63             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
64             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
65             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
66             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
67             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
68                 dl.ptr_sized_integer()
69             }
70         }
71     }
72
73     /// Finds the appropriate Integer type and signedness for the given
74     /// signed discriminant range and #[repr] attribute.
75     /// N.B.: u128 values above i128::MAX will be treated as signed, but
76     /// that shouldn't affect anything, other than maybe debuginfo.
77     fn repr_discr<'tcx>(
78         tcx: TyCtxt<'tcx>,
79         ty: Ty<'tcx>,
80         repr: &ReprOptions,
81         min: i128,
82         max: i128,
83     ) -> (Integer, bool) {
84         // Theoretically, negative values could be larger in unsigned representation
85         // than the unsigned representation of the signed minimum. However, if there
86         // are any negative values, the only valid unsigned representation is u128
87         // which can fit all i128 values, so the result remains unaffected.
88         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
89         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
90
91         let mut min_from_extern = None;
92         let min_default = I8;
93
94         if let Some(ity) = repr.int {
95             let discr = Integer::from_attr(&tcx, ity);
96             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
97             if discr < fit {
98                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
99                       discriminant range of enum `{}", ty)
100             }
101             return (discr, ity.is_signed());
102         }
103
104         if repr.c() {
105             match &tcx.sess.target.target.arch[..] {
106                 // WARNING: the ARM EABI has two variants; the one corresponding
107                 // to `at_least == I32` appears to be used on Linux and NetBSD,
108                 // but some systems may use the variant corresponding to no
109                 // lower bound. However, we don't run on those yet...?
110                 "arm" => min_from_extern = Some(I32),
111                 _ => min_from_extern = Some(I32),
112             }
113         }
114
115         let at_least = min_from_extern.unwrap_or(min_default);
116
117         // If there are no negative values, we can use the unsigned fit.
118         if min >= 0 {
119             (cmp::max(unsigned_fit, at_least), false)
120         } else {
121             (cmp::max(signed_fit, at_least), true)
122         }
123     }
124 }
125
126 pub trait PrimitiveExt {
127     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
128     fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
129 }
130
131 impl PrimitiveExt for Primitive {
132     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
133         match *self {
134             Int(i, signed) => i.to_ty(tcx, signed),
135             F32 => tcx.types.f32,
136             F64 => tcx.types.f64,
137             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
138         }
139     }
140
141     /// Return an *integer* type matching this primitive.
142     /// Useful in particular when dealing with enum discriminants.
143     fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
144         match *self {
145             Int(i, signed) => i.to_ty(tcx, signed),
146             Pointer => tcx.types.usize,
147             F32 | F64 => bug!("floats do not have an int type"),
148         }
149     }
150 }
151
152 /// The first half of a fat pointer.
153 ///
154 /// - For a trait object, this is the address of the box.
155 /// - For a slice, this is the base address.
156 pub const FAT_PTR_ADDR: usize = 0;
157
158 /// The second half of a fat pointer.
159 ///
160 /// - For a trait object, this is the address of the vtable.
161 /// - For a slice, this is the length.
162 pub const FAT_PTR_EXTRA: usize = 1;
163
164 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
165 pub enum LayoutError<'tcx> {
166     Unknown(Ty<'tcx>),
167     SizeOverflow(Ty<'tcx>)
168 }
169
170 impl<'tcx> fmt::Display for LayoutError<'tcx> {
171     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
172         match *self {
173             LayoutError::Unknown(ty) => {
174                 write!(f, "the type `{:?}` has an unknown layout", ty)
175             }
176             LayoutError::SizeOverflow(ty) => {
177                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
178             }
179         }
180     }
181 }
182
183 fn layout_raw<'tcx>(
184     tcx: TyCtxt<'tcx>,
185     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
186 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
187     ty::tls::with_related_context(tcx, move |icx| {
188         let rec_limit = *tcx.sess.recursion_limit.get();
189         let (param_env, ty) = query.into_parts();
190
191         if icx.layout_depth > rec_limit {
192             tcx.sess.fatal(
193                 &format!("overflow representing the type `{}`", ty));
194         }
195
196         // Update the ImplicitCtxt to increase the layout_depth
197         let icx = ty::tls::ImplicitCtxt {
198             layout_depth: icx.layout_depth + 1,
199             ..icx.clone()
200         };
201
202         ty::tls::enter_context(&icx, |_| {
203             let cx = LayoutCx { tcx, param_env };
204             let layout = cx.layout_raw_uncached(ty);
205             // Type-level uninhabitedness should always imply ABI uninhabitedness.
206             if let Ok(layout) = layout {
207                 if ty.conservative_is_privately_uninhabited(tcx) {
208                     assert!(layout.abi.is_uninhabited());
209                 }
210             }
211             layout
212         })
213     })
214 }
215
216 pub fn provide(providers: &mut ty::query::Providers<'_>) {
217     *providers = ty::query::Providers {
218         layout_raw,
219         ..*providers
220     };
221 }
222
223 pub struct LayoutCx<'tcx, C> {
224     pub tcx: C,
225     pub param_env: ty::ParamEnv<'tcx>,
226 }
227
228 #[derive(Copy, Clone, Debug)]
229 enum StructKind {
230     /// A tuple, closure, or univariant which cannot be coerced to unsized.
231     AlwaysSized,
232     /// A univariant, the last field of which may be coerced to unsized.
233     MaybeUnsized,
234     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
235     Prefixed(Size, Align),
236 }
237
238 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
239 // This is used to go between `memory_index` (source field order to memory order)
240 // and `inverse_memory_index` (memory order to source field order).
241 // See also `FieldPlacement::Arbitrary::memory_index` for more details.
242 // FIXME(eddyb) build a better abstraction for permutations, if possible.
243 fn invert_mapping(map: &[u32]) -> Vec<u32> {
244     let mut inverse = vec![0; map.len()];
245     for i in 0..map.len() {
246         inverse[map[i] as usize] = i as u32;
247     }
248     inverse
249 }
250
251 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
252     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
253         let dl = self.data_layout();
254         let b_align = b.value.align(dl);
255         let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
256         let b_offset = a.value.size(dl).align_to(b_align.abi);
257         let size = (b_offset + b.value.size(dl)).align_to(align.abi);
258
259         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
260         // returns the last maximum.
261         let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
262             .into_iter()
263             .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
264             .max_by_key(|niche| niche.available(dl));
265
266         LayoutDetails {
267             variants: Variants::Single { index: VariantIdx::new(0) },
268             fields: FieldPlacement::Arbitrary {
269                 offsets: vec![Size::ZERO, b_offset],
270                 memory_index: vec![0, 1]
271             },
272             abi: Abi::ScalarPair(a, b),
273             largest_niche,
274             align,
275             size
276         }
277     }
278
279     fn univariant_uninterned(&self,
280                              ty: Ty<'tcx>,
281                              fields: &[TyLayout<'_>],
282                              repr: &ReprOptions,
283                              kind: StructKind) -> Result<LayoutDetails, LayoutError<'tcx>> {
284         let dl = self.data_layout();
285         let pack = repr.pack;
286         if pack.is_some() && repr.align.is_some() {
287             bug!("struct cannot be packed and aligned");
288         }
289
290         let mut align = if pack.is_some() {
291             dl.i8_align
292         } else {
293             dl.aggregate_align
294         };
295
296         let mut sized = true;
297         let mut offsets = vec![Size::ZERO; fields.len()];
298         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
299
300         let mut optimize = !repr.inhibit_struct_field_reordering_opt();
301         if let StructKind::Prefixed(_, align) = kind {
302             optimize &= align.bytes() == 1;
303         }
304
305         if optimize {
306             let end = if let StructKind::MaybeUnsized = kind {
307                 fields.len() - 1
308             } else {
309                 fields.len()
310             };
311             let optimizing = &mut inverse_memory_index[..end];
312             let field_align = |f: &TyLayout<'_>| {
313                 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
314             };
315             match kind {
316                 StructKind::AlwaysSized |
317                 StructKind::MaybeUnsized => {
318                     optimizing.sort_by_key(|&x| {
319                         // Place ZSTs first to avoid "interesting offsets",
320                         // especially with only one or two non-ZST fields.
321                         let f = &fields[x as usize];
322                         (!f.is_zst(), cmp::Reverse(field_align(f)))
323                     });
324                 }
325                 StructKind::Prefixed(..) => {
326                     optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
327                 }
328             }
329         }
330
331         // inverse_memory_index holds field indices by increasing memory offset.
332         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
333         // We now write field offsets to the corresponding offset slot;
334         // field 5 with offset 0 puts 0 in offsets[5].
335         // At the bottom of this function, we invert `inverse_memory_index` to
336         // produce `memory_index` (see `invert_mapping`).
337
338
339         let mut offset = Size::ZERO;
340         let mut largest_niche = None;
341         let mut largest_niche_available = 0;
342
343         if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
344             let prefix_align = if let Some(pack) = pack {
345                 prefix_align.min(pack)
346             } else {
347                 prefix_align
348             };
349             align = align.max(AbiAndPrefAlign::new(prefix_align));
350             offset = prefix_size.align_to(prefix_align);
351         }
352
353         for &i in &inverse_memory_index {
354             let field = fields[i as usize];
355             if !sized {
356                 bug!("univariant: field #{} of `{}` comes after unsized field",
357                      offsets.len(), ty);
358             }
359
360             if field.is_unsized() {
361                 sized = false;
362             }
363
364             // Invariant: offset < dl.obj_size_bound() <= 1<<61
365             let field_align = if let Some(pack) = pack {
366                 field.align.min(AbiAndPrefAlign::new(pack))
367             } else {
368                 field.align
369             };
370             offset = offset.align_to(field_align.abi);
371             align = align.max(field_align);
372
373             debug!("univariant offset: {:?} field: {:#?}", offset, field);
374             offsets[i as usize] = offset;
375
376             if let Some(mut niche) = field.largest_niche.clone() {
377                 let available = niche.available(dl);
378                 if available > largest_niche_available {
379                     largest_niche_available = available;
380                     niche.offset += offset;
381                     largest_niche = Some(niche);
382                 }
383             }
384
385             offset = offset.checked_add(field.size, dl)
386                 .ok_or(LayoutError::SizeOverflow(ty))?;
387         }
388
389         if let Some(repr_align) = repr.align {
390             align = align.max(AbiAndPrefAlign::new(repr_align));
391         }
392
393         debug!("univariant min_size: {:?}", offset);
394         let min_size = offset;
395
396         // As stated above, inverse_memory_index holds field indices by increasing offset.
397         // This makes it an already-sorted view of the offsets vec.
398         // To invert it, consider:
399         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
400         // Field 5 would be the first element, so memory_index is i:
401         // Note: if we didn't optimize, it's already right.
402
403         let memory_index;
404         if optimize {
405             memory_index = invert_mapping(&inverse_memory_index);
406         } else {
407             memory_index = inverse_memory_index;
408         }
409
410         let size = min_size.align_to(align.abi);
411         let mut abi = Abi::Aggregate { sized };
412
413         // Unpack newtype ABIs and find scalar pairs.
414         if sized && size.bytes() > 0 {
415             // All other fields must be ZSTs, and we need them to all start at 0.
416             let mut zst_offsets =
417                 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
418             if zst_offsets.all(|(_, o)| o.bytes() == 0) {
419                 let mut non_zst_fields =
420                     fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
421
422                 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
423                     // We have exactly one non-ZST field.
424                     (Some((i, field)), None, None) => {
425                         // Field fills the struct and it has a scalar or scalar pair ABI.
426                         if offsets[i].bytes() == 0 &&
427                            align.abi == field.align.abi &&
428                            size == field.size {
429                             match field.abi {
430                                 // For plain scalars, or vectors of them, we can't unpack
431                                 // newtypes for `#[repr(C)]`, as that affects C ABIs.
432                                 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
433                                     abi = field.abi.clone();
434                                 }
435                                 // But scalar pairs are Rust-specific and get
436                                 // treated as aggregates by C ABIs anyway.
437                                 Abi::ScalarPair(..) => {
438                                     abi = field.abi.clone();
439                                 }
440                                 _ => {}
441                             }
442                         }
443                     }
444
445                     // Two non-ZST fields, and they're both scalars.
446                     (Some((i, &TyLayout {
447                         details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
448                     })), Some((j, &TyLayout {
449                         details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
450                     })), None) => {
451                         // Order by the memory placement, not source order.
452                         let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
453                             ((i, a), (j, b))
454                         } else {
455                             ((j, b), (i, a))
456                         };
457                         let pair = self.scalar_pair(a.clone(), b.clone());
458                         let pair_offsets = match pair.fields {
459                             FieldPlacement::Arbitrary {
460                                 ref offsets,
461                                 ref memory_index
462                             } => {
463                                 assert_eq!(memory_index, &[0, 1]);
464                                 offsets
465                             }
466                             _ => bug!()
467                         };
468                         if offsets[i] == pair_offsets[0] &&
469                            offsets[j] == pair_offsets[1] &&
470                            align == pair.align &&
471                            size == pair.size {
472                             // We can use `ScalarPair` only when it matches our
473                             // already computed layout (including `#[repr(C)]`).
474                             abi = pair.abi;
475                         }
476                     }
477
478                     _ => {}
479                 }
480             }
481         }
482
483         if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
484             abi = Abi::Uninhabited;
485         }
486
487         Ok(LayoutDetails {
488             variants: Variants::Single { index: VariantIdx::new(0) },
489             fields: FieldPlacement::Arbitrary {
490                 offsets,
491                 memory_index
492             },
493             abi,
494             largest_niche,
495             align,
496             size
497         })
498     }
499
500     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
501         let tcx = self.tcx;
502         let param_env = self.param_env;
503         let dl = self.data_layout();
504         let scalar_unit = |value: Primitive| {
505             let bits = value.size(dl).bits();
506             assert!(bits <= 128);
507             Scalar {
508                 value,
509                 valid_range: 0..=(!0 >> (128 - bits))
510             }
511         };
512         let scalar = |value: Primitive| {
513             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
514         };
515
516         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
517             Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
518         };
519         debug_assert!(!ty.has_infer_types());
520
521         Ok(match ty.kind {
522             // Basic scalars.
523             ty::Bool => {
524                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
525                     value: Int(I8, false),
526                     valid_range: 0..=1
527                 }))
528             }
529             ty::Char => {
530                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
531                     value: Int(I32, false),
532                     valid_range: 0..=0x10FFFF
533                 }))
534             }
535             ty::Int(ity) => {
536                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
537             }
538             ty::Uint(ity) => {
539                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
540             }
541             ty::Float(fty) => scalar(match fty {
542                 ast::FloatTy::F32 => F32,
543                 ast::FloatTy::F64 => F64,
544             }),
545             ty::FnPtr(_) => {
546                 let mut ptr = scalar_unit(Pointer);
547                 ptr.valid_range = 1..=*ptr.valid_range.end();
548                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
549             }
550
551             // The never type.
552             ty::Never => {
553                 tcx.intern_layout(LayoutDetails {
554                     variants: Variants::Single { index: VariantIdx::new(0) },
555                     fields: FieldPlacement::Union(0),
556                     abi: Abi::Uninhabited,
557                     largest_niche: None,
558                     align: dl.i8_align,
559                     size: Size::ZERO
560                 })
561             }
562
563             // Potentially-fat pointers.
564             ty::Ref(_, pointee, _) |
565             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
566                 let mut data_ptr = scalar_unit(Pointer);
567                 if !ty.is_unsafe_ptr() {
568                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
569                 }
570
571                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
572                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
573                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
574                 }
575
576                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
577                 let metadata = match unsized_part.kind {
578                     ty::Foreign(..) => {
579                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
580                     }
581                     ty::Slice(_) | ty::Str => {
582                         scalar_unit(Int(dl.ptr_sized_integer(), false))
583                     }
584                     ty::Dynamic(..) => {
585                         let mut vtable = scalar_unit(Pointer);
586                         vtable.valid_range = 1..=*vtable.valid_range.end();
587                         vtable
588                     }
589                     _ => return Err(LayoutError::Unknown(unsized_part))
590                 };
591
592                 // Effectively a (ptr, meta) tuple.
593                 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
594             }
595
596             // Arrays and slices.
597             ty::Array(element, mut count) => {
598                 if count.has_projections() {
599                     count = tcx.normalize_erasing_regions(param_env, count);
600                     if count.has_projections() {
601                         return Err(LayoutError::Unknown(ty));
602                     }
603                 }
604
605                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
606                 let element = self.layout_of(element)?;
607                 let size = element.size.checked_mul(count, dl)
608                     .ok_or(LayoutError::SizeOverflow(ty))?;
609
610                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
611                     Abi::Uninhabited
612                 } else {
613                     Abi::Aggregate { sized: true }
614                 };
615
616                 let largest_niche = if count != 0 {
617                     element.largest_niche.clone()
618                 } else {
619                     None
620                 };
621
622                 tcx.intern_layout(LayoutDetails {
623                     variants: Variants::Single { index: VariantIdx::new(0) },
624                     fields: FieldPlacement::Array {
625                         stride: element.size,
626                         count
627                     },
628                     abi,
629                     largest_niche,
630                     align: element.align,
631                     size
632                 })
633             }
634             ty::Slice(element) => {
635                 let element = self.layout_of(element)?;
636                 tcx.intern_layout(LayoutDetails {
637                     variants: Variants::Single { index: VariantIdx::new(0) },
638                     fields: FieldPlacement::Array {
639                         stride: element.size,
640                         count: 0
641                     },
642                     abi: Abi::Aggregate { sized: false },
643                     largest_niche: None,
644                     align: element.align,
645                     size: Size::ZERO
646                 })
647             }
648             ty::Str => {
649                 tcx.intern_layout(LayoutDetails {
650                     variants: Variants::Single { index: VariantIdx::new(0) },
651                     fields: FieldPlacement::Array {
652                         stride: Size::from_bytes(1),
653                         count: 0
654                     },
655                     abi: Abi::Aggregate { sized: false },
656                     largest_niche: None,
657                     align: dl.i8_align,
658                     size: Size::ZERO
659                 })
660             }
661
662             // Odd unit types.
663             ty::FnDef(..) => {
664                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
665             }
666             ty::Dynamic(..) | ty::Foreign(..) => {
667                 let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(),
668                   StructKind::AlwaysSized)?;
669                 match unit.abi {
670                     Abi::Aggregate { ref mut sized } => *sized = false,
671                     _ => bug!()
672                 }
673                 tcx.intern_layout(unit)
674             }
675
676             ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
677
678             ty::Closure(def_id, ref substs) => {
679                 let tys = substs.as_closure().upvar_tys(def_id, tcx);
680                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
681                     &ReprOptions::default(),
682                     StructKind::AlwaysSized)?
683             }
684
685             ty::Tuple(tys) => {
686                 let kind = if tys.len() == 0 {
687                     StructKind::AlwaysSized
688                 } else {
689                     StructKind::MaybeUnsized
690                 };
691
692                 univariant(&tys.iter().map(|k| {
693                     self.layout_of(k.expect_ty())
694                 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
695             }
696
697             // SIMD vector types.
698             ty::Adt(def, ..) if def.repr.simd() => {
699                 let element = self.layout_of(ty.simd_type(tcx))?;
700                 let count = ty.simd_size(tcx) as u64;
701                 assert!(count > 0);
702                 let scalar = match element.abi {
703                     Abi::Scalar(ref scalar) => scalar.clone(),
704                     _ => {
705                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
706                                                  a non-machine element type `{}`",
707                                                 ty, element.ty));
708                     }
709                 };
710                 let size = element.size.checked_mul(count, dl)
711                     .ok_or(LayoutError::SizeOverflow(ty))?;
712                 let align = dl.vector_align(size);
713                 let size = size.align_to(align.abi);
714
715                 tcx.intern_layout(LayoutDetails {
716                     variants: Variants::Single { index: VariantIdx::new(0) },
717                     fields: FieldPlacement::Array {
718                         stride: element.size,
719                         count
720                     },
721                     abi: Abi::Vector {
722                         element: scalar,
723                         count
724                     },
725                     largest_niche: element.largest_niche.clone(),
726                     size,
727                     align,
728                 })
729             }
730
731             // ADTs.
732             ty::Adt(def, substs) => {
733                 // Cache the field layouts.
734                 let variants = def.variants.iter().map(|v| {
735                     v.fields.iter().map(|field| {
736                         self.layout_of(field.ty(tcx, substs))
737                     }).collect::<Result<Vec<_>, _>>()
738                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
739
740                 if def.is_union() {
741                     if def.repr.pack.is_some() && def.repr.align.is_some() {
742                         bug!("union cannot be packed and aligned");
743                     }
744
745                     let mut align = if def.repr.pack.is_some() {
746                         dl.i8_align
747                     } else {
748                         dl.aggregate_align
749                     };
750
751                     if let Some(repr_align) = def.repr.align {
752                         align = align.max(AbiAndPrefAlign::new(repr_align));
753                     }
754
755                     let optimize = !def.repr.inhibit_union_abi_opt();
756                     let mut size = Size::ZERO;
757                     let mut abi = Abi::Aggregate { sized: true };
758                     let index = VariantIdx::new(0);
759                     for field in &variants[index] {
760                         assert!(!field.is_unsized());
761                         align = align.max(field.align);
762
763                         // If all non-ZST fields have the same ABI, forward this ABI
764                         if optimize && !field.is_zst() {
765                             // Normalize scalar_unit to the maximal valid range
766                             let field_abi = match &field.abi {
767                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
768                                 Abi::ScalarPair(x, y) => {
769                                     Abi::ScalarPair(
770                                         scalar_unit(x.value),
771                                         scalar_unit(y.value),
772                                     )
773                                 }
774                                 Abi::Vector { element: x, count } => {
775                                     Abi::Vector {
776                                         element: scalar_unit(x.value),
777                                         count: *count,
778                                     }
779                                 }
780                                 Abi::Uninhabited |
781                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
782                             };
783
784                             if size == Size::ZERO {
785                                 // first non ZST: initialize 'abi'
786                                 abi = field_abi;
787                             } else if abi != field_abi  {
788                                 // different fields have different ABI: reset to Aggregate
789                                 abi = Abi::Aggregate { sized: true };
790                             }
791                         }
792
793                         size = cmp::max(size, field.size);
794                     }
795
796                     if let Some(pack) = def.repr.pack {
797                         align = align.min(AbiAndPrefAlign::new(pack));
798                     }
799
800                     return Ok(tcx.intern_layout(LayoutDetails {
801                         variants: Variants::Single { index },
802                         fields: FieldPlacement::Union(variants[index].len()),
803                         abi,
804                         largest_niche: None,
805                         align,
806                         size: size.align_to(align.abi)
807                     }));
808                 }
809
810                 // A variant is absent if it's uninhabited and only has ZST fields.
811                 // Present uninhabited variants only require space for their fields,
812                 // but *not* an encoding of the discriminant (e.g., a tag value).
813                 // See issue #49298 for more details on the need to leave space
814                 // for non-ZST uninhabited data (mostly partial initialization).
815                 let absent = |fields: &[TyLayout<'_>]| {
816                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
817                     let is_zst = fields.iter().all(|f| f.is_zst());
818                     uninhabited && is_zst
819                 };
820                 let (present_first, present_second) = {
821                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
822                         if absent(v) {
823                             None
824                         } else {
825                             Some(i)
826                         }
827                     });
828                     (present_variants.next(), present_variants.next())
829                 };
830                 let present_first = match present_first {
831                     present_first @ Some(_) => present_first,
832                     // Uninhabited because it has no variants, or only absent ones.
833                     None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
834                     // if it's a struct, still compute a layout so that we can still compute the
835                     // field offsets
836                     None => Some(VariantIdx::new(0)),
837                 };
838
839                 let is_struct = !def.is_enum() ||
840                     // Only one variant is present.
841                     (present_second.is_none() &&
842                     // Representation optimizations are allowed.
843                     !def.repr.inhibit_enum_layout_opt());
844                 if is_struct {
845                     // Struct, or univariant enum equivalent to a struct.
846                     // (Typechecking will reject discriminant-sizing attrs.)
847
848                     let v = present_first.unwrap();
849                     let kind = if def.is_enum() || variants[v].len() == 0 {
850                         StructKind::AlwaysSized
851                     } else {
852                         let param_env = tcx.param_env(def.did);
853                         let last_field = def.variants[v].fields.last().unwrap();
854                         let always_sized = tcx.type_of(last_field.did)
855                                               .is_sized(tcx.at(DUMMY_SP), param_env);
856                         if !always_sized { StructKind::MaybeUnsized }
857                         else { StructKind::AlwaysSized }
858                     };
859
860                     let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
861                     st.variants = Variants::Single { index: v };
862                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
863                     match st.abi {
864                         Abi::Scalar(ref mut scalar) |
865                         Abi::ScalarPair(ref mut scalar, _) => {
866                             // the asserts ensure that we are not using the
867                             // `#[rustc_layout_scalar_valid_range(n)]`
868                             // attribute to widen the range of anything as that would probably
869                             // result in UB somewhere
870                             // FIXME(eddyb) the asserts are probably not needed,
871                             // as larger validity ranges would result in missed
872                             // optimizations, *not* wrongly assuming the inner
873                             // value is valid. e.g. unions enlarge validity ranges,
874                             // because the values may be uninitialized.
875                             if let Bound::Included(start) = start {
876                                 // FIXME(eddyb) this might be incorrect - it doesn't
877                                 // account for wrap-around (end < start) ranges.
878                                 assert!(*scalar.valid_range.start() <= start);
879                                 scalar.valid_range = start..=*scalar.valid_range.end();
880                             }
881                             if let Bound::Included(end) = end {
882                                 // FIXME(eddyb) this might be incorrect - it doesn't
883                                 // account for wrap-around (end < start) ranges.
884                                 assert!(*scalar.valid_range.end() >= end);
885                                 scalar.valid_range = *scalar.valid_range.start()..=end;
886                             }
887
888                             // Update `largest_niche` if we have introduced a larger niche.
889                             let niche = Niche::from_scalar(dl, Size::ZERO, scalar.clone());
890                             if let Some(niche) = niche {
891                                 match &st.largest_niche {
892                                     Some(largest_niche) => {
893                                         // Replace the existing niche even if they're equal,
894                                         // because this one is at a lower offset.
895                                         if largest_niche.available(dl) <= niche.available(dl) {
896                                             st.largest_niche = Some(niche);
897                                         }
898                                     }
899                                     None => st.largest_niche = Some(niche),
900                                 }
901                             }
902                         }
903                         _ => assert!(
904                             start == Bound::Unbounded && end == Bound::Unbounded,
905                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
906                             def,
907                             st,
908                         ),
909                     }
910
911                     return Ok(tcx.intern_layout(st));
912                 }
913
914                 // The current code for niche-filling relies on variant indices
915                 // instead of actual discriminants, so dataful enums with
916                 // explicit discriminants (RFC #2363) would misbehave.
917                 let no_explicit_discriminants = def.variants.iter_enumerated()
918                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
919
920                 // Niche-filling enum optimization.
921                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
922                     let mut dataful_variant = None;
923                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
924
925                     // Find one non-ZST variant.
926                     'variants: for (v, fields) in variants.iter_enumerated() {
927                         if absent(fields) {
928                             continue 'variants;
929                         }
930                         for f in fields {
931                             if !f.is_zst() {
932                                 if dataful_variant.is_none() {
933                                     dataful_variant = Some(v);
934                                     continue 'variants;
935                                 } else {
936                                     dataful_variant = None;
937                                     break 'variants;
938                                 }
939                             }
940                         }
941                         niche_variants = *niche_variants.start().min(&v)..=v;
942                     }
943
944                     if niche_variants.start() > niche_variants.end() {
945                         dataful_variant = None;
946                     }
947
948                     if let Some(i) = dataful_variant {
949                         let count = (
950                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
951                         ) as u128;
952                         // FIXME(#62691) use the largest niche across all fields,
953                         // not just the first one.
954                         for (field_index, &field) in variants[i].iter().enumerate() {
955                             let niche = match &field.largest_niche {
956                                 Some(niche) => niche,
957                                 _ => continue,
958                             };
959                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
960                                 Some(pair) => pair,
961                                 None => continue,
962                             };
963
964                             let mut align = dl.aggregate_align;
965                             let st = variants.iter_enumerated().map(|(j, v)| {
966                                 let mut st = self.univariant_uninterned(ty, v,
967                                     &def.repr, StructKind::AlwaysSized)?;
968                                 st.variants = Variants::Single { index: j };
969
970                                 align = align.max(st.align);
971
972                                 Ok(st)
973                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
974
975                             let offset = st[i].fields.offset(field_index) + niche.offset;
976                             let size = st[i].size;
977
978                             let mut abi = match st[i].abi {
979                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
980                                 Abi::ScalarPair(ref first, ref second) => {
981                                     // We need to use scalar_unit to reset the
982                                     // valid range to the maximal one for that
983                                     // primitive, because only the niche is
984                                     // guaranteed to be initialised, not the
985                                     // other primitive.
986                                     if offset.bytes() == 0 {
987                                         Abi::ScalarPair(
988                                             niche_scalar.clone(),
989                                             scalar_unit(second.value),
990                                         )
991                                     } else {
992                                         Abi::ScalarPair(
993                                             scalar_unit(first.value),
994                                             niche_scalar.clone(),
995                                         )
996                                     }
997                                 }
998                                 _ => Abi::Aggregate { sized: true },
999                             };
1000
1001                             if st.iter().all(|v| v.abi.is_uninhabited()) {
1002                                 abi = Abi::Uninhabited;
1003                             }
1004
1005
1006                             let largest_niche =
1007                                 Niche::from_scalar(dl, offset, niche_scalar.clone());
1008
1009                             return Ok(tcx.intern_layout(LayoutDetails {
1010                                 variants: Variants::Multiple {
1011                                     discr: niche_scalar,
1012                                     discr_kind: DiscriminantKind::Niche {
1013                                         dataful_variant: i,
1014                                         niche_variants,
1015                                         niche_start,
1016                                     },
1017                                     discr_index: 0,
1018                                     variants: st,
1019                                 },
1020                                 fields: FieldPlacement::Arbitrary {
1021                                     offsets: vec![offset],
1022                                     memory_index: vec![0]
1023                                 },
1024                                 abi,
1025                                 largest_niche,
1026                                 size,
1027                                 align,
1028                             }));
1029                         }
1030                     }
1031                 }
1032
1033                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
1034                 let discr_type = def.repr.discr_type();
1035                 let bits = Integer::from_attr(self, discr_type).size().bits();
1036                 for (i, discr) in def.discriminants(tcx) {
1037                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1038                         continue;
1039                     }
1040                     let mut x = discr.val as i128;
1041                     if discr_type.is_signed() {
1042                         // sign extend the raw representation to be an i128
1043                         x = (x << (128 - bits)) >> (128 - bits);
1044                     }
1045                     if x < min { min = x; }
1046                     if x > max { max = x; }
1047                 }
1048                 // We might have no inhabited variants, so pretend there's at least one.
1049                 if (min, max) == (i128::max_value(), i128::min_value()) {
1050                     min = 0;
1051                     max = 0;
1052                 }
1053                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1054                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1055
1056                 let mut align = dl.aggregate_align;
1057                 let mut size = Size::ZERO;
1058
1059                 // We're interested in the smallest alignment, so start large.
1060                 let mut start_align = Align::from_bytes(256).unwrap();
1061                 assert_eq!(Integer::for_align(dl, start_align), None);
1062
1063                 // repr(C) on an enum tells us to make a (tag, union) layout,
1064                 // so we need to grow the prefix alignment to be at least
1065                 // the alignment of the union. (This value is used both for
1066                 // determining the alignment of the overall enum, and the
1067                 // determining the alignment of the payload after the tag.)
1068                 let mut prefix_align = min_ity.align(dl).abi;
1069                 if def.repr.c() {
1070                     for fields in &variants {
1071                         for field in fields {
1072                             prefix_align = prefix_align.max(field.align.abi);
1073                         }
1074                     }
1075                 }
1076
1077                 // Create the set of structs that represent each variant.
1078                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
1079                     let mut st = self.univariant_uninterned(ty, &field_layouts,
1080                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1081                     st.variants = Variants::Single { index: i };
1082                     // Find the first field we can't move later
1083                     // to make room for a larger discriminant.
1084                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1085                         if !field.is_zst() || field.align.abi.bytes() != 1 {
1086                             start_align = start_align.min(field.align.abi);
1087                             break;
1088                         }
1089                     }
1090                     size = cmp::max(size, st.size);
1091                     align = align.max(st.align);
1092                     Ok(st)
1093                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1094
1095                 // Align the maximum variant size to the largest alignment.
1096                 size = size.align_to(align.abi);
1097
1098                 if size.bytes() >= dl.obj_size_bound() {
1099                     return Err(LayoutError::SizeOverflow(ty));
1100                 }
1101
1102                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1103                 if typeck_ity < min_ity {
1104                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1105                     // some reason at this point (based on values discriminant can take on). Mostly
1106                     // because this discriminant will be loaded, and then stored into variable of
1107                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1108                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1109                     // discriminant values. That would be a bug, because then, in codegen, in order
1110                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1111                     // space necessary to represent would have to be discarded (or layout is wrong
1112                     // on thinking it needs 16 bits)
1113                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1114                          min_ity, typeck_ity);
1115                     // However, it is fine to make discr type however large (as an optimisation)
1116                     // after this point â€“ we’ll just truncate the value we load in codegen.
1117                 }
1118
1119                 // Check to see if we should use a different type for the
1120                 // discriminant. We can safely use a type with the same size
1121                 // as the alignment of the first field of each variant.
1122                 // We increase the size of the discriminant to avoid LLVM copying
1123                 // padding when it doesn't need to. This normally causes unaligned
1124                 // load/stores and excessive memcpy/memset operations. By using a
1125                 // bigger integer size, LLVM can be sure about its contents and
1126                 // won't be so conservative.
1127
1128                 // Use the initial field alignment
1129                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1130                     min_ity
1131                 } else {
1132                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1133                 };
1134
1135                 // If the alignment is not larger than the chosen discriminant size,
1136                 // don't use the alignment as the final size.
1137                 if ity <= min_ity {
1138                     ity = min_ity;
1139                 } else {
1140                     // Patch up the variants' first few fields.
1141                     let old_ity_size = min_ity.size();
1142                     let new_ity_size = ity.size();
1143                     for variant in &mut layout_variants {
1144                         match variant.fields {
1145                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1146                                 for i in offsets {
1147                                     if *i <= old_ity_size {
1148                                         assert_eq!(*i, old_ity_size);
1149                                         *i = new_ity_size;
1150                                     }
1151                                 }
1152                                 // We might be making the struct larger.
1153                                 if variant.size <= old_ity_size {
1154                                     variant.size = new_ity_size;
1155                                 }
1156                             }
1157                             _ => bug!()
1158                         }
1159                     }
1160                 }
1161
1162                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1163                 let tag = Scalar {
1164                     value: Int(ity, signed),
1165                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1166                 };
1167                 let mut abi = Abi::Aggregate { sized: true };
1168                 if tag.value.size(dl) == size {
1169                     abi = Abi::Scalar(tag.clone());
1170                 } else {
1171                     // Try to use a ScalarPair for all tagged enums.
1172                     let mut common_prim = None;
1173                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1174                         let offsets = match layout_variant.fields {
1175                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1176                             _ => bug!(),
1177                         };
1178                         let mut fields = field_layouts
1179                             .iter()
1180                             .zip(offsets)
1181                             .filter(|p| !p.0.is_zst());
1182                         let (field, offset) = match (fields.next(), fields.next()) {
1183                             (None, None) => continue,
1184                             (Some(pair), None) => pair,
1185                             _ => {
1186                                 common_prim = None;
1187                                 break;
1188                             }
1189                         };
1190                         let prim = match field.details.abi {
1191                             Abi::Scalar(ref scalar) => scalar.value,
1192                             _ => {
1193                                 common_prim = None;
1194                                 break;
1195                             }
1196                         };
1197                         if let Some(pair) = common_prim {
1198                             // This is pretty conservative. We could go fancier
1199                             // by conflating things like i32 and u32, or even
1200                             // realising that (u8, u8) could just cohabit with
1201                             // u16 or even u32.
1202                             if pair != (prim, offset) {
1203                                 common_prim = None;
1204                                 break;
1205                             }
1206                         } else {
1207                             common_prim = Some((prim, offset));
1208                         }
1209                     }
1210                     if let Some((prim, offset)) = common_prim {
1211                         let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1212                         let pair_offsets = match pair.fields {
1213                             FieldPlacement::Arbitrary {
1214                                 ref offsets,
1215                                 ref memory_index
1216                             } => {
1217                                 assert_eq!(memory_index, &[0, 1]);
1218                                 offsets
1219                             }
1220                             _ => bug!()
1221                         };
1222                         if pair_offsets[0] == Size::ZERO &&
1223                             pair_offsets[1] == *offset &&
1224                             align == pair.align &&
1225                             size == pair.size {
1226                             // We can use `ScalarPair` only when it matches our
1227                             // already computed layout (including `#[repr(C)]`).
1228                             abi = pair.abi;
1229                         }
1230                     }
1231                 }
1232
1233                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1234                     abi = Abi::Uninhabited;
1235                 }
1236
1237                 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1238
1239                 tcx.intern_layout(LayoutDetails {
1240                     variants: Variants::Multiple {
1241                         discr: tag,
1242                         discr_kind: DiscriminantKind::Tag,
1243                         discr_index: 0,
1244                         variants: layout_variants,
1245                     },
1246                     fields: FieldPlacement::Arbitrary {
1247                         offsets: vec![Size::ZERO],
1248                         memory_index: vec![0]
1249                     },
1250                     largest_niche,
1251                     abi,
1252                     align,
1253                     size
1254                 })
1255             }
1256
1257             // Types with no meaningful known layout.
1258             ty::Projection(_) | ty::Opaque(..) => {
1259                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1260                 if ty == normalized {
1261                     return Err(LayoutError::Unknown(ty));
1262                 }
1263                 tcx.layout_raw(param_env.and(normalized))?
1264             }
1265
1266             ty::Bound(..) |
1267             ty::Placeholder(..) |
1268             ty::UnnormalizedProjection(..) |
1269             ty::GeneratorWitness(..) |
1270             ty::Infer(_) => {
1271                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1272             }
1273
1274             ty::Param(_) | ty::Error => {
1275                 return Err(LayoutError::Unknown(ty));
1276             }
1277         })
1278     }
1279 }
1280
1281 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1282 #[derive(Clone, Debug, PartialEq)]
1283 enum SavedLocalEligibility {
1284     Unassigned,
1285     Assigned(VariantIdx),
1286     // FIXME: Use newtype_index so we aren't wasting bytes
1287     Ineligible(Option<u32>),
1288 }
1289
1290 // When laying out generators, we divide our saved local fields into two
1291 // categories: overlap-eligible and overlap-ineligible.
1292 //
1293 // Those fields which are ineligible for overlap go in a "prefix" at the
1294 // beginning of the layout, and always have space reserved for them.
1295 //
1296 // Overlap-eligible fields are only assigned to one variant, so we lay
1297 // those fields out for each variant and put them right after the
1298 // prefix.
1299 //
1300 // Finally, in the layout details, we point to the fields from the
1301 // variants they are assigned to. It is possible for some fields to be
1302 // included in multiple variants. No field ever "moves around" in the
1303 // layout; its offset is always the same.
1304 //
1305 // Also included in the layout are the upvars and the discriminant.
1306 // These are included as fields on the "outer" layout; they are not part
1307 // of any variant.
1308 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1309     /// Compute the eligibility and assignment of each local.
1310     fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>)
1311     -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1312         use SavedLocalEligibility::*;
1313
1314         let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1315             IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1316
1317         // The saved locals not eligible for overlap. These will get
1318         // "promoted" to the prefix of our generator.
1319         let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1320
1321         // Figure out which of our saved locals are fields in only
1322         // one variant. The rest are deemed ineligible for overlap.
1323         for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1324             for local in fields {
1325                 match assignments[*local] {
1326                     Unassigned => {
1327                         assignments[*local] = Assigned(variant_index);
1328                     }
1329                     Assigned(idx) => {
1330                         // We've already seen this local at another suspension
1331                         // point, so it is no longer a candidate.
1332                         trace!("removing local {:?} in >1 variant ({:?}, {:?})",
1333                                local, variant_index, idx);
1334                         ineligible_locals.insert(*local);
1335                         assignments[*local] = Ineligible(None);
1336                     }
1337                     Ineligible(_) => {},
1338                 }
1339             }
1340         }
1341
1342         // Next, check every pair of eligible locals to see if they
1343         // conflict.
1344         for local_a in info.storage_conflicts.rows() {
1345             let conflicts_a = info.storage_conflicts.count(local_a);
1346             if ineligible_locals.contains(local_a) {
1347                 continue;
1348             }
1349
1350             for local_b in info.storage_conflicts.iter(local_a) {
1351                 // local_a and local_b are storage live at the same time, therefore they
1352                 // cannot overlap in the generator layout. The only way to guarantee
1353                 // this is if they are in the same variant, or one is ineligible
1354                 // (which means it is stored in every variant).
1355                 if ineligible_locals.contains(local_b) ||
1356                     assignments[local_a] == assignments[local_b]
1357                 {
1358                     continue;
1359                 }
1360
1361                 // If they conflict, we will choose one to make ineligible.
1362                 // This is not always optimal; it's just a greedy heuristic that
1363                 // seems to produce good results most of the time.
1364                 let conflicts_b = info.storage_conflicts.count(local_b);
1365                 let (remove, other) = if conflicts_a > conflicts_b {
1366                     (local_a, local_b)
1367                 } else {
1368                     (local_b, local_a)
1369                 };
1370                 ineligible_locals.insert(remove);
1371                 assignments[remove] = Ineligible(None);
1372                 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1373             }
1374         }
1375
1376         // Count the number of variants in use. If only one of them, then it is
1377         // impossible to overlap any locals in our layout. In this case it's
1378         // always better to make the remaining locals ineligible, so we can
1379         // lay them out with the other locals in the prefix and eliminate
1380         // unnecessary padding bytes.
1381         {
1382             let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1383             for assignment in &assignments {
1384                 match assignment {
1385                     Assigned(idx) => { used_variants.insert(*idx); }
1386                     _ => {}
1387                 }
1388             }
1389             if used_variants.count() < 2 {
1390                 for assignment in assignments.iter_mut() {
1391                     *assignment = Ineligible(None);
1392                 }
1393                 ineligible_locals.insert_all();
1394             }
1395         }
1396
1397         // Write down the order of our locals that will be promoted to the prefix.
1398         {
1399             let mut idx = 0u32;
1400             for local in ineligible_locals.iter() {
1401                 assignments[local] = Ineligible(Some(idx));
1402                 idx += 1;
1403             }
1404         }
1405         debug!("generator saved local assignments: {:?}", assignments);
1406
1407         (ineligible_locals, assignments)
1408     }
1409
1410     /// Compute the full generator layout.
1411     fn generator_layout(
1412         &self,
1413         ty: Ty<'tcx>,
1414         def_id: hir::def_id::DefId,
1415         substs: SubstsRef<'tcx>,
1416     ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1417         use SavedLocalEligibility::*;
1418         let tcx = self.tcx;
1419
1420         let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs) };
1421
1422         let info = tcx.generator_layout(def_id);
1423         let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1424
1425         // Build a prefix layout, including "promoting" all ineligible
1426         // locals as part of the prefix. We compute the layout of all of
1427         // these fields at once to get optimal packing.
1428         let discr_index = substs.as_generator().prefix_tys(def_id, tcx).count();
1429         // FIXME(eddyb) set the correct vaidity range for the discriminant.
1430         let discr_layout = self.layout_of(substs.as_generator().discr_ty(tcx))?;
1431         let discr = match &discr_layout.abi {
1432             Abi::Scalar(s) => s.clone(),
1433             _ => bug!(),
1434         };
1435         let promoted_layouts = ineligible_locals.iter()
1436             .map(|local| subst_field(info.field_tys[local]))
1437             .map(|ty| tcx.mk_maybe_uninit(ty))
1438             .map(|ty| self.layout_of(ty));
1439         let prefix_layouts = substs.as_generator().prefix_tys(def_id, tcx)
1440             .map(|ty| self.layout_of(ty))
1441             .chain(iter::once(Ok(discr_layout)))
1442             .chain(promoted_layouts)
1443             .collect::<Result<Vec<_>, _>>()?;
1444         let prefix = self.univariant_uninterned(
1445             ty,
1446             &prefix_layouts,
1447             &ReprOptions::default(),
1448             StructKind::AlwaysSized,
1449         )?;
1450
1451         let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1452
1453         // Split the prefix layout into the "outer" fields (upvars and
1454         // discriminant) and the "promoted" fields. Promoted fields will
1455         // get included in each variant that requested them in
1456         // GeneratorLayout.
1457         debug!("prefix = {:#?}", prefix);
1458         let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1459             FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1460                 let mut inverse_memory_index = invert_mapping(&memory_index);
1461
1462                 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1463                 // "outer" and "promoted" fields respectively.
1464                 let b_start = (discr_index + 1) as u32;
1465                 let offsets_b = offsets.split_off(b_start as usize);
1466                 let offsets_a = offsets;
1467
1468                 // Disentangle the "a" and "b" components of `inverse_memory_index`
1469                 // by preserving the order but keeping only one disjoint "half" each.
1470                 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1471                 let inverse_memory_index_b: Vec<_> =
1472                     inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1473                 inverse_memory_index.retain(|&i| i < b_start);
1474                 let inverse_memory_index_a = inverse_memory_index;
1475
1476                 // Since `inverse_memory_index_{a,b}` each only refer to their
1477                 // respective fields, they can be safely inverted
1478                 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1479                 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1480
1481                 let outer_fields = FieldPlacement::Arbitrary {
1482                     offsets: offsets_a,
1483                     memory_index: memory_index_a,
1484                 };
1485                 (outer_fields, offsets_b, memory_index_b)
1486             }
1487             _ => bug!(),
1488         };
1489
1490         let mut size = prefix.size;
1491         let mut align = prefix.align;
1492         let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| {
1493             // Only include overlap-eligible fields when we compute our variant layout.
1494             let variant_only_tys = variant_fields
1495                 .iter()
1496                 .filter(|local| {
1497                     match assignments[**local] {
1498                         Unassigned => bug!(),
1499                         Assigned(v) if v == index => true,
1500                         Assigned(_) => bug!("assignment does not match variant"),
1501                         Ineligible(_) => false,
1502                     }
1503                 })
1504                 .map(|local| subst_field(info.field_tys[*local]));
1505
1506             let mut variant = self.univariant_uninterned(
1507                 ty,
1508                 &variant_only_tys
1509                     .map(|ty| self.layout_of(ty))
1510                     .collect::<Result<Vec<_>, _>>()?,
1511                 &ReprOptions::default(),
1512                 StructKind::Prefixed(prefix_size, prefix_align.abi))?;
1513             variant.variants = Variants::Single { index };
1514
1515             let (offsets, memory_index) = match variant.fields {
1516                 FieldPlacement::Arbitrary { offsets, memory_index } => {
1517                     (offsets, memory_index)
1518                 }
1519                 _ => bug!(),
1520             };
1521
1522             // Now, stitch the promoted and variant-only fields back together in
1523             // the order they are mentioned by our GeneratorLayout.
1524             // Because we only use some subset (that can differ between variants)
1525             // of the promoted fields, we can't just pick those elements of the
1526             // `promoted_memory_index` (as we'd end up with gaps).
1527             // So instead, we build an "inverse memory_index", as if all of the
1528             // promoted fields were being used, but leave the elements not in the
1529             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1530             // obtain a valid (bijective) mapping.
1531             const INVALID_FIELD_IDX: u32 = !0;
1532             let mut combined_inverse_memory_index =
1533                 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1534             let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1535             let combined_offsets = variant_fields.iter().enumerate().map(|(i, local)| {
1536                 let (offset, memory_index) = match assignments[*local] {
1537                     Unassigned => bug!(),
1538                     Assigned(_) => {
1539                         let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1540                         (offset, promoted_memory_index.len() as u32 + memory_index)
1541                     }
1542                     Ineligible(field_idx) => {
1543                         let field_idx = field_idx.unwrap() as usize;
1544                         (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1545                     }
1546                 };
1547                 combined_inverse_memory_index[memory_index as usize] = i as u32;
1548                 offset
1549             }).collect();
1550
1551             // Remove the unused slots and invert the mapping to obtain the
1552             // combined `memory_index` (also see previous comment).
1553             combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1554             let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1555
1556             variant.fields = FieldPlacement::Arbitrary {
1557                 offsets: combined_offsets,
1558                 memory_index: combined_memory_index,
1559             };
1560
1561             size = size.max(variant.size);
1562             align = align.max(variant.align);
1563             Ok(variant)
1564         }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1565
1566         size = size.align_to(align.abi);
1567
1568         let abi = if prefix.abi.is_uninhabited() ||
1569                      variants.iter().all(|v| v.abi.is_uninhabited()) {
1570             Abi::Uninhabited
1571         } else {
1572             Abi::Aggregate { sized: true }
1573         };
1574
1575         let layout = tcx.intern_layout(LayoutDetails {
1576             variants: Variants::Multiple {
1577                 discr,
1578                 discr_kind: DiscriminantKind::Tag,
1579                 discr_index,
1580                 variants,
1581             },
1582             fields: outer_fields,
1583             abi,
1584             largest_niche: prefix.largest_niche,
1585             size,
1586             align,
1587         });
1588         debug!("generator layout ({:?}): {:#?}", ty, layout);
1589         Ok(layout)
1590     }
1591
1592     /// This is invoked by the `layout_raw` query to record the final
1593     /// layout of each type.
1594     #[inline(always)]
1595     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1596         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1597         // for dumping later.
1598         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1599             self.record_layout_for_printing_outlined(layout)
1600         }
1601     }
1602
1603     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1604         // Ignore layouts that are done with non-empty environments or
1605         // non-monomorphic layouts, as the user only wants to see the stuff
1606         // resulting from the final codegen session.
1607         if
1608             layout.ty.has_param_types() ||
1609             !self.param_env.caller_bounds.is_empty()
1610         {
1611             return;
1612         }
1613
1614         // (delay format until we actually need it)
1615         let record = |kind, packed, opt_discr_size, variants| {
1616             let type_desc = format!("{:?}", layout.ty);
1617             self.tcx.sess.code_stats.record_type_size(kind,
1618                                                       type_desc,
1619                                                       layout.align.abi,
1620                                                       layout.size,
1621                                                       packed,
1622                                                       opt_discr_size,
1623                                                       variants);
1624         };
1625
1626         let adt_def = match layout.ty.kind {
1627             ty::Adt(ref adt_def, _) => {
1628                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1629                 adt_def
1630             }
1631
1632             ty::Closure(..) => {
1633                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1634                 record(DataTypeKind::Closure, false, None, vec![]);
1635                 return;
1636             }
1637
1638             _ => {
1639                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1640                 return;
1641             }
1642         };
1643
1644         let adt_kind = adt_def.adt_kind();
1645         let adt_packed = adt_def.repr.pack.is_some();
1646
1647         let build_variant_info = |n: Option<Ident>,
1648                                   flds: &[ast::Name],
1649                                   layout: TyLayout<'tcx>| {
1650             let mut min_size = Size::ZERO;
1651             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1652                 match layout.field(self, i) {
1653                     Err(err) => {
1654                         bug!("no layout found for field {}: `{:?}`", name, err);
1655                     }
1656                     Ok(field_layout) => {
1657                         let offset = layout.fields.offset(i);
1658                         let field_end = offset + field_layout.size;
1659                         if min_size < field_end {
1660                             min_size = field_end;
1661                         }
1662                         session::FieldInfo {
1663                             name: name.to_string(),
1664                             offset: offset.bytes(),
1665                             size: field_layout.size.bytes(),
1666                             align: field_layout.align.abi.bytes(),
1667                         }
1668                     }
1669                 }
1670             }).collect();
1671
1672             session::VariantInfo {
1673                 name: n.map(|n| n.to_string()),
1674                 kind: if layout.is_unsized() {
1675                     session::SizeKind::Min
1676                 } else {
1677                     session::SizeKind::Exact
1678                 },
1679                 align: layout.align.abi.bytes(),
1680                 size: if min_size.bytes() == 0 {
1681                     layout.size.bytes()
1682                 } else {
1683                     min_size.bytes()
1684                 },
1685                 fields: field_info,
1686             }
1687         };
1688
1689         match layout.variants {
1690             Variants::Single { index } => {
1691                 debug!("print-type-size `{:#?}` variant {}",
1692                        layout, adt_def.variants[index].ident);
1693                 if !adt_def.variants.is_empty() {
1694                     let variant_def = &adt_def.variants[index];
1695                     let fields: Vec<_> =
1696                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1697                     record(adt_kind.into(),
1698                            adt_packed,
1699                            None,
1700                            vec![build_variant_info(Some(variant_def.ident),
1701                                                    &fields,
1702                                                    layout)]);
1703                 } else {
1704                     // (This case arises for *empty* enums; so give it
1705                     // zero variants.)
1706                     record(adt_kind.into(), adt_packed, None, vec![]);
1707                 }
1708             }
1709
1710             Variants::Multiple { ref discr, ref discr_kind, .. } => {
1711                 debug!("print-type-size `{:#?}` adt general variants def {}",
1712                        layout.ty, adt_def.variants.len());
1713                 let variant_infos: Vec<_> =
1714                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1715                         let fields: Vec<_> =
1716                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1717                         build_variant_info(Some(variant_def.ident),
1718                                            &fields,
1719                                            layout.for_variant(self, i))
1720                     })
1721                     .collect();
1722                 record(adt_kind.into(), adt_packed, match discr_kind {
1723                     DiscriminantKind::Tag => Some(discr.value.size(self)),
1724                     _ => None
1725                 }, variant_infos);
1726             }
1727         }
1728     }
1729 }
1730
1731 /// Type size "skeleton", i.e., the only information determining a type's size.
1732 /// While this is conservative, (aside from constant sizes, only pointers,
1733 /// newtypes thereof and null pointer optimized enums are allowed), it is
1734 /// enough to statically check common use cases of transmute.
1735 #[derive(Copy, Clone, Debug)]
1736 pub enum SizeSkeleton<'tcx> {
1737     /// Any statically computable Layout.
1738     Known(Size),
1739
1740     /// A potentially-fat pointer.
1741     Pointer {
1742         /// If true, this pointer is never null.
1743         non_zero: bool,
1744         /// The type which determines the unsized metadata, if any,
1745         /// of this pointer. Either a type parameter or a projection
1746         /// depending on one, with regions erased.
1747         tail: Ty<'tcx>
1748     }
1749 }
1750
1751 impl<'tcx> SizeSkeleton<'tcx> {
1752     pub fn compute(
1753         ty: Ty<'tcx>,
1754         tcx: TyCtxt<'tcx>,
1755         param_env: ty::ParamEnv<'tcx>,
1756     ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1757         debug_assert!(!ty.has_infer_types());
1758
1759         // First try computing a static layout.
1760         let err = match tcx.layout_of(param_env.and(ty)) {
1761             Ok(layout) => {
1762                 return Ok(SizeSkeleton::Known(layout.size));
1763             }
1764             Err(err) => err
1765         };
1766
1767         match ty.kind {
1768             ty::Ref(_, pointee, _) |
1769             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1770                 let non_zero = !ty.is_unsafe_ptr();
1771                 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1772                 match tail.kind {
1773                     ty::Param(_) | ty::Projection(_) => {
1774                         debug_assert!(tail.has_param_types());
1775                         Ok(SizeSkeleton::Pointer {
1776                             non_zero,
1777                             tail: tcx.erase_regions(&tail)
1778                         })
1779                     }
1780                     _ => {
1781                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1782                               tail `{}` is not a type parameter or a projection",
1783                              ty, err, tail)
1784                     }
1785                 }
1786             }
1787
1788             ty::Adt(def, substs) => {
1789                 // Only newtypes and enums w/ nullable pointer optimization.
1790                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1791                     return Err(err);
1792                 }
1793
1794                 // Get a zero-sized variant or a pointer newtype.
1795                 let zero_or_ptr_variant = |i| {
1796                     let i = VariantIdx::new(i);
1797                     let fields = def.variants[i].fields.iter().map(|field| {
1798                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1799                     });
1800                     let mut ptr = None;
1801                     for field in fields {
1802                         let field = field?;
1803                         match field {
1804                             SizeSkeleton::Known(size) => {
1805                                 if size.bytes() > 0 {
1806                                     return Err(err);
1807                                 }
1808                             }
1809                             SizeSkeleton::Pointer {..} => {
1810                                 if ptr.is_some() {
1811                                     return Err(err);
1812                                 }
1813                                 ptr = Some(field);
1814                             }
1815                         }
1816                     }
1817                     Ok(ptr)
1818                 };
1819
1820                 let v0 = zero_or_ptr_variant(0)?;
1821                 // Newtype.
1822                 if def.variants.len() == 1 {
1823                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1824                         return Ok(SizeSkeleton::Pointer {
1825                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1826                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1827                                 (Bound::Included(start), Bound::Included(end)) =>
1828                                     0 < start && start < end,
1829                                 _ => false,
1830                             },
1831                             tail,
1832                         });
1833                     } else {
1834                         return Err(err);
1835                     }
1836                 }
1837
1838                 let v1 = zero_or_ptr_variant(1)?;
1839                 // Nullable pointer enum optimization.
1840                 match (v0, v1) {
1841                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1842                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1843                         Ok(SizeSkeleton::Pointer {
1844                             non_zero: false,
1845                             tail,
1846                         })
1847                     }
1848                     _ => Err(err)
1849                 }
1850             }
1851
1852             ty::Projection(_) | ty::Opaque(..) => {
1853                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1854                 if ty == normalized {
1855                     Err(err)
1856                 } else {
1857                     SizeSkeleton::compute(normalized, tcx, param_env)
1858                 }
1859             }
1860
1861             _ => Err(err)
1862         }
1863     }
1864
1865     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1866         match (self, other) {
1867             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1868             (SizeSkeleton::Pointer { tail: a, .. },
1869              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1870             _ => false
1871         }
1872     }
1873 }
1874
1875 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1876     fn tcx(&self) -> TyCtxt<'tcx>;
1877 }
1878
1879 pub trait HasParamEnv<'tcx> {
1880     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1881 }
1882
1883 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1884     fn data_layout(&self) -> &TargetDataLayout {
1885         &self.data_layout
1886     }
1887 }
1888
1889 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1890     fn tcx(&self) -> TyCtxt<'tcx> {
1891         *self
1892     }
1893 }
1894
1895 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1896     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1897         self.param_env
1898     }
1899 }
1900
1901 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1902     fn data_layout(&self) -> &TargetDataLayout {
1903         self.tcx.data_layout()
1904     }
1905 }
1906
1907 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1908     fn tcx(&self) -> TyCtxt<'tcx> {
1909         self.tcx.tcx()
1910     }
1911 }
1912
1913 pub trait MaybeResult<T> {
1914     type Error;
1915
1916     fn from(x: Result<T, Self::Error>) -> Self;
1917     fn to_result(self) -> Result<T, Self::Error>;
1918 }
1919
1920 impl<T> MaybeResult<T> for T {
1921     type Error = !;
1922
1923     fn from(x: Result<T, Self::Error>) -> Self {
1924         let Ok(x) = x;
1925         x
1926     }
1927     fn to_result(self) -> Result<T, Self::Error> {
1928         Ok(self)
1929     }
1930 }
1931
1932 impl<T, E> MaybeResult<T> for Result<T, E> {
1933     type Error = E;
1934
1935     fn from(x: Result<T, Self::Error>) -> Self {
1936         x
1937     }
1938     fn to_result(self) -> Result<T, Self::Error> {
1939         self
1940     }
1941 }
1942
1943 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1944
1945 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1946     type Ty = Ty<'tcx>;
1947     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1948
1949     /// Computes the layout of a type. Note that this implicitly
1950     /// executes in "reveal all" mode.
1951     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1952         let param_env = self.param_env.with_reveal_all();
1953         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1954         let details = self.tcx.layout_raw(param_env.and(ty))?;
1955         let layout = TyLayout {
1956             ty,
1957             details
1958         };
1959
1960         // N.B., this recording is normally disabled; when enabled, it
1961         // can however trigger recursive invocations of `layout_of`.
1962         // Therefore, we execute it *after* the main query has
1963         // completed, to avoid problems around recursive structures
1964         // and the like. (Admittedly, I wasn't able to reproduce a problem
1965         // here, but it seems like the right thing to do. -nmatsakis)
1966         self.record_layout_for_printing(layout);
1967
1968         Ok(layout)
1969     }
1970 }
1971
1972 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1973     type Ty = Ty<'tcx>;
1974     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1975
1976     /// Computes the layout of a type. Note that this implicitly
1977     /// executes in "reveal all" mode.
1978     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1979         let param_env = self.param_env.with_reveal_all();
1980         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1981         let details = self.tcx.layout_raw(param_env.and(ty))?;
1982         let layout = TyLayout {
1983             ty,
1984             details
1985         };
1986
1987         // N.B., this recording is normally disabled; when enabled, it
1988         // can however trigger recursive invocations of `layout_of`.
1989         // Therefore, we execute it *after* the main query has
1990         // completed, to avoid problems around recursive structures
1991         // and the like. (Admittedly, I wasn't able to reproduce a problem
1992         // here, but it seems like the right thing to do. -nmatsakis)
1993         let cx = LayoutCx {
1994             tcx: *self.tcx,
1995             param_env: self.param_env
1996         };
1997         cx.record_layout_for_printing(layout);
1998
1999         Ok(layout)
2000     }
2001 }
2002
2003 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2004 impl TyCtxt<'tcx> {
2005     /// Computes the layout of a type. Note that this implicitly
2006     /// executes in "reveal all" mode.
2007     #[inline]
2008     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2009                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2010         let cx = LayoutCx {
2011             tcx: self,
2012             param_env: param_env_and_ty.param_env
2013         };
2014         cx.layout_of(param_env_and_ty.value)
2015     }
2016 }
2017
2018 impl ty::query::TyCtxtAt<'tcx> {
2019     /// Computes the layout of a type. Note that this implicitly
2020     /// executes in "reveal all" mode.
2021     #[inline]
2022     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2023                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2024         let cx = LayoutCx {
2025             tcx: self.at(self.span),
2026             param_env: param_env_and_ty.param_env
2027         };
2028         cx.layout_of(param_env_and_ty.value)
2029     }
2030 }
2031
2032 impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
2033 where
2034     C: LayoutOf<Ty = Ty<'tcx>, TyLayout: MaybeResult<TyLayout<'tcx>>>
2035         + HasTyCtxt<'tcx>
2036         + HasParamEnv<'tcx>,
2037 {
2038     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
2039         let details = match this.variants {
2040             Variants::Single { index } if index == variant_index => this.details,
2041
2042             Variants::Single { index } => {
2043                 // Deny calling for_variant more than once for non-Single enums.
2044                 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
2045                     assert_eq!(layout.variants, Variants::Single { index });
2046                 }
2047
2048                 let fields = match this.ty.kind {
2049                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2050                     _ => bug!()
2051                 };
2052                 let tcx = cx.tcx();
2053                 tcx.intern_layout(LayoutDetails {
2054                     variants: Variants::Single { index: variant_index },
2055                     fields: FieldPlacement::Union(fields),
2056                     abi: Abi::Uninhabited,
2057                     largest_niche: None,
2058                     align: tcx.data_layout.i8_align,
2059                     size: Size::ZERO
2060                 })
2061             }
2062
2063             Variants::Multiple { ref variants, .. } => {
2064                 &variants[variant_index]
2065             }
2066         };
2067
2068         assert_eq!(details.variants, Variants::Single { index: variant_index });
2069
2070         TyLayout {
2071             ty: this.ty,
2072             details
2073         }
2074     }
2075
2076     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
2077         let tcx = cx.tcx();
2078         let discr_layout = |discr: &Scalar| -> C::TyLayout {
2079             let layout = LayoutDetails::scalar(cx, discr.clone());
2080             MaybeResult::from(Ok(TyLayout {
2081                 details: tcx.intern_layout(layout),
2082                 ty: discr.value.to_ty(tcx),
2083             }))
2084         };
2085
2086         cx.layout_of(match this.ty.kind {
2087             ty::Bool |
2088             ty::Char |
2089             ty::Int(_) |
2090             ty::Uint(_) |
2091             ty::Float(_) |
2092             ty::FnPtr(_) |
2093             ty::Never |
2094             ty::FnDef(..) |
2095             ty::GeneratorWitness(..) |
2096             ty::Foreign(..) |
2097             ty::Dynamic(..) => {
2098                 bug!("TyLayout::field_type({:?}): not applicable", this)
2099             }
2100
2101             // Potentially-fat pointers.
2102             ty::Ref(_, pointee, _) |
2103             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2104                 assert!(i < this.fields.count());
2105
2106                 // Reuse the fat *T type as its own thin pointer data field.
2107                 // This provides information about e.g., DST struct pointees
2108                 // (which may have no non-DST form), and will work as long
2109                 // as the `Abi` or `FieldPlacement` is checked by users.
2110                 if i == 0 {
2111                     let nil = tcx.mk_unit();
2112                     let ptr_ty = if this.ty.is_unsafe_ptr() {
2113                         tcx.mk_mut_ptr(nil)
2114                     } else {
2115                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2116                     };
2117                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2118                         ptr_layout.ty = this.ty;
2119                         ptr_layout
2120                     }));
2121                 }
2122
2123                 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
2124                     ty::Slice(_) |
2125                     ty::Str => tcx.types.usize,
2126                     ty::Dynamic(_, _) => {
2127                         tcx.mk_imm_ref(
2128                             tcx.lifetimes.re_static,
2129                             tcx.mk_array(tcx.types.usize, 3),
2130                         )
2131                         /* FIXME: use actual fn pointers
2132                         Warning: naively computing the number of entries in the
2133                         vtable by counting the methods on the trait + methods on
2134                         all parent traits does not work, because some methods can
2135                         be not object safe and thus excluded from the vtable.
2136                         Increase this counter if you tried to implement this but
2137                         failed to do it without duplicating a lot of code from
2138                         other places in the compiler: 2
2139                         tcx.mk_tup(&[
2140                             tcx.mk_array(tcx.types.usize, 3),
2141                             tcx.mk_array(Option<fn()>),
2142                         ])
2143                         */
2144                     }
2145                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
2146                 }
2147             }
2148
2149             // Arrays and slices.
2150             ty::Array(element, _) |
2151             ty::Slice(element) => element,
2152             ty::Str => tcx.types.u8,
2153
2154             // Tuples, generators and closures.
2155             ty::Closure(def_id, ref substs) => {
2156                 substs.as_closure().upvar_tys(def_id, tcx).nth(i).unwrap()
2157             }
2158
2159             ty::Generator(def_id, ref substs, _) => {
2160                 match this.variants {
2161                     Variants::Single { index } => {
2162                         substs.as_generator().state_tys(def_id, tcx)
2163                             .nth(index.as_usize()).unwrap()
2164                             .nth(i).unwrap()
2165                     }
2166                     Variants::Multiple { ref discr, discr_index, .. } => {
2167                         if i == discr_index {
2168                             return discr_layout(discr);
2169                         }
2170                         substs.as_generator().prefix_tys(def_id, tcx).nth(i).unwrap()
2171                     }
2172                 }
2173             }
2174
2175             ty::Tuple(tys) => tys[i].expect_ty(),
2176
2177             // SIMD vector types.
2178             ty::Adt(def, ..) if def.repr.simd() => {
2179                 this.ty.simd_type(tcx)
2180             }
2181
2182             // ADTs.
2183             ty::Adt(def, substs) => {
2184                 match this.variants {
2185                     Variants::Single { index } => {
2186                         def.variants[index].fields[i].ty(tcx, substs)
2187                     }
2188
2189                     // Discriminant field for enums (where applicable).
2190                     Variants::Multiple { ref discr, .. } => {
2191                         assert_eq!(i, 0);
2192                         return discr_layout(discr);
2193                     }
2194                 }
2195             }
2196
2197             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
2198             ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
2199             ty::Error => {
2200                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
2201             }
2202         })
2203     }
2204
2205     fn pointee_info_at(
2206         this: TyLayout<'tcx>,
2207         cx: &C,
2208         offset: Size,
2209     ) -> Option<PointeeInfo> {
2210         match this.ty.kind {
2211             ty::RawPtr(mt) if offset.bytes() == 0 => {
2212                 cx.layout_of(mt.ty).to_result().ok()
2213                     .map(|layout| PointeeInfo {
2214                         size: layout.size,
2215                         align: layout.align.abi,
2216                         safe: None,
2217                     })
2218             }
2219
2220             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2221                 let tcx = cx.tcx();
2222                 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2223                 let kind = match mt {
2224                     hir::Mutability::Immutable => if is_freeze {
2225                         PointerKind::Frozen
2226                     } else {
2227                         PointerKind::Shared
2228                     },
2229                     hir::Mutability::Mutable => {
2230                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
2231                         // panic=abort mode. That was deemed right, as prior versions had many bugs
2232                         // in conjunction with unwinding, but later versions didn’t seem to have
2233                         // said issues. See issue #31681.
2234                         //
2235                         // Alas, later on we encountered a case where noalias would generate wrong
2236                         // code altogether even with recent versions of LLVM in *safe* code with no
2237                         // unwinding involved. See #54462.
2238                         //
2239                         // For now, do not enable mutable_noalias by default at all, while the
2240                         // issue is being figured out.
2241                         let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
2242                             .unwrap_or(false);
2243                         if mutable_noalias {
2244                             PointerKind::UniqueBorrowed
2245                         } else {
2246                             PointerKind::Shared
2247                         }
2248                     }
2249                 };
2250
2251                 cx.layout_of(ty).to_result().ok()
2252                     .map(|layout| PointeeInfo {
2253                         size: layout.size,
2254                         align: layout.align.abi,
2255                         safe: Some(kind),
2256                     })
2257             }
2258
2259             _ => {
2260                 let mut data_variant = match this.variants {
2261                     // Within the discriminant field, only the niche itself is
2262                     // always initialized, so we only check for a pointer at its
2263                     // offset.
2264                     //
2265                     // If the niche is a pointer, it's either valid (according
2266                     // to its type), or null (which the niche field's scalar
2267                     // validity range encodes).  This allows using
2268                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2269                     // this will continue to work as long as we don't start
2270                     // using more niches than just null (e.g., the first page of
2271                     // the address space, or unaligned pointers).
2272                     Variants::Multiple {
2273                         discr_kind: DiscriminantKind::Niche {
2274                             dataful_variant,
2275                             ..
2276                         },
2277                         discr_index,
2278                         ..
2279                     } if this.fields.offset(discr_index) == offset =>
2280                         Some(this.for_variant(cx, dataful_variant)),
2281                     _ => Some(this),
2282                 };
2283
2284                 if let Some(variant) = data_variant {
2285                     // We're not interested in any unions.
2286                     if let FieldPlacement::Union(_) = variant.fields {
2287                         data_variant = None;
2288                     }
2289                 }
2290
2291                 let mut result = None;
2292
2293                 if let Some(variant) = data_variant {
2294                     let ptr_end = offset + Pointer.size(cx);
2295                     for i in 0..variant.fields.count() {
2296                         let field_start = variant.fields.offset(i);
2297                         if field_start <= offset {
2298                             let field = variant.field(cx, i);
2299                             result = field.to_result().ok()
2300                                 .and_then(|field| {
2301                                     if ptr_end <= field_start + field.size {
2302                                         // We found the right field, look inside it.
2303                                         field.pointee_info_at(cx, offset - field_start)
2304                                     } else {
2305                                         None
2306                                     }
2307                                 });
2308                             if result.is_some() {
2309                                 break;
2310                             }
2311                         }
2312                     }
2313                 }
2314
2315                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2316                 if let Some(ref mut pointee) = result {
2317                     if let ty::Adt(def, _) = this.ty.kind {
2318                         if def.is_box() && offset.bytes() == 0 {
2319                             pointee.safe = Some(PointerKind::UniqueOwned);
2320                         }
2321                     }
2322                 }
2323
2324                 result
2325             }
2326         }
2327     }
2328 }
2329
2330 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
2331     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2332         use crate::ty::layout::Variants::*;
2333         mem::discriminant(self).hash_stable(hcx, hasher);
2334
2335         match *self {
2336             Single { index } => {
2337                 index.hash_stable(hcx, hasher);
2338             }
2339             Multiple {
2340                 ref discr,
2341                 ref discr_kind,
2342                 discr_index,
2343                 ref variants,
2344             } => {
2345                 discr.hash_stable(hcx, hasher);
2346                 discr_kind.hash_stable(hcx, hasher);
2347                 discr_index.hash_stable(hcx, hasher);
2348                 variants.hash_stable(hcx, hasher);
2349             }
2350         }
2351     }
2352 }
2353
2354 impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2355     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2356         use crate::ty::layout::DiscriminantKind::*;
2357         mem::discriminant(self).hash_stable(hcx, hasher);
2358
2359         match *self {
2360             Tag => {}
2361             Niche {
2362                 dataful_variant,
2363                 ref niche_variants,
2364                 niche_start,
2365             } => {
2366                 dataful_variant.hash_stable(hcx, hasher);
2367                 niche_variants.start().hash_stable(hcx, hasher);
2368                 niche_variants.end().hash_stable(hcx, hasher);
2369                 niche_start.hash_stable(hcx, hasher);
2370             }
2371         }
2372     }
2373 }
2374
2375 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
2376     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2377         use crate::ty::layout::FieldPlacement::*;
2378         mem::discriminant(self).hash_stable(hcx, hasher);
2379
2380         match *self {
2381             Union(count) => {
2382                 count.hash_stable(hcx, hasher);
2383             }
2384             Array { count, stride } => {
2385                 count.hash_stable(hcx, hasher);
2386                 stride.hash_stable(hcx, hasher);
2387             }
2388             Arbitrary { ref offsets, ref memory_index } => {
2389                 offsets.hash_stable(hcx, hasher);
2390                 memory_index.hash_stable(hcx, hasher);
2391             }
2392         }
2393     }
2394 }
2395
2396 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2397     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2398         self.as_u32().hash_stable(hcx, hasher)
2399     }
2400 }
2401
2402 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
2403     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2404         use crate::ty::layout::Abi::*;
2405         mem::discriminant(self).hash_stable(hcx, hasher);
2406
2407         match *self {
2408             Uninhabited => {}
2409             Scalar(ref value) => {
2410                 value.hash_stable(hcx, hasher);
2411             }
2412             ScalarPair(ref a, ref b) => {
2413                 a.hash_stable(hcx, hasher);
2414                 b.hash_stable(hcx, hasher);
2415             }
2416             Vector { ref element, count } => {
2417                 element.hash_stable(hcx, hasher);
2418                 count.hash_stable(hcx, hasher);
2419             }
2420             Aggregate { sized } => {
2421                 sized.hash_stable(hcx, hasher);
2422             }
2423         }
2424     }
2425 }
2426
2427 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
2428     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2429         let Scalar { value, ref valid_range } = *self;
2430         value.hash_stable(hcx, hasher);
2431         valid_range.start().hash_stable(hcx, hasher);
2432         valid_range.end().hash_stable(hcx, hasher);
2433     }
2434 }
2435
2436 impl_stable_hash_for!(struct crate::ty::layout::Niche {
2437     offset,
2438     scalar
2439 });
2440
2441 impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
2442     variants,
2443     fields,
2444     abi,
2445     largest_niche,
2446     size,
2447     align
2448 });
2449
2450 impl_stable_hash_for!(enum crate::ty::layout::Integer {
2451     I8,
2452     I16,
2453     I32,
2454     I64,
2455     I128
2456 });
2457
2458 impl_stable_hash_for!(enum crate::ty::layout::Primitive {
2459     Int(integer, signed),
2460     F32,
2461     F64,
2462     Pointer
2463 });
2464
2465 impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
2466     abi,
2467     pref
2468 });
2469
2470 impl<'tcx> HashStable<StableHashingContext<'tcx>> for Align {
2471     fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
2472         self.bytes().hash_stable(hcx, hasher);
2473     }
2474 }
2475
2476 impl<'tcx> HashStable<StableHashingContext<'tcx>> for Size {
2477     fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
2478         self.bytes().hash_stable(hcx, hasher);
2479     }
2480 }
2481
2482 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2483     fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2484         use crate::ty::layout::LayoutError::*;
2485         mem::discriminant(self).hash_stable(hcx, hasher);
2486
2487         match *self {
2488             Unknown(t) |
2489             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2490         }
2491     }
2492 }
2493
2494 pub trait FnAbiExt<'tcx, C>
2495 where
2496     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2497         + HasDataLayout
2498         + HasTargetSpec
2499         + HasTyCtxt<'tcx>
2500         + HasParamEnv<'tcx>,
2501 {
2502     fn of_instance(cx: &C, instance: ty::Instance<'tcx>) -> Self;
2503     fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2504     fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2505     fn new_internal(
2506         cx: &C,
2507         sig: ty::FnSig<'tcx>,
2508         extra_args: &[Ty<'tcx>],
2509         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2510     ) -> Self;
2511     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2512 }
2513
2514 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2515 where
2516     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2517         + HasDataLayout
2518         + HasTargetSpec
2519         + HasTyCtxt<'tcx>
2520         + HasParamEnv<'tcx>,
2521 {
2522     fn of_instance(cx: &C, instance: ty::Instance<'tcx>) -> Self {
2523         let sig = instance.fn_sig(cx.tcx());
2524         let sig = cx
2525             .tcx()
2526             .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2527         call::FnAbi::new(cx, sig, &[])
2528     }
2529
2530     fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2531         call::FnAbi::new_internal(cx, sig, extra_args, |ty, _| ArgAbi::new(cx.layout_of(ty)))
2532     }
2533
2534     fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2535         FnAbiExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
2536             let mut layout = cx.layout_of(ty);
2537             // Don't pass the vtable, it's not an argument of the virtual fn.
2538             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2539             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2540             if arg_idx == Some(0) {
2541                 let fat_pointer_ty = if layout.is_unsized() {
2542                     // unsized `self` is passed as a pointer to `self`
2543                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2544                     cx.tcx().mk_mut_ptr(layout.ty)
2545                 } else {
2546                     match layout.abi {
2547                         Abi::ScalarPair(..) => (),
2548                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2549                     }
2550
2551                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2552                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2553                     // elsewhere in the compiler as a method on a `dyn Trait`.
2554                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2555                     // get a built-in pointer type
2556                     let mut fat_pointer_layout = layout;
2557                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2558                         && !fat_pointer_layout.ty.is_region_ptr()
2559                     {
2560                         'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
2561                             let field_layout = fat_pointer_layout.field(cx, i);
2562
2563                             if !field_layout.is_zst() {
2564                                 fat_pointer_layout = field_layout;
2565                                 continue 'descend_newtypes;
2566                             }
2567                         }
2568
2569                         bug!(
2570                             "receiver has no non-zero-sized fields {:?}",
2571                             fat_pointer_layout
2572                         );
2573                     }
2574
2575                     fat_pointer_layout.ty
2576                 };
2577
2578                 // we now have a type like `*mut RcBox<dyn Trait>`
2579                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2580                 // this is understood as a special case elsewhere in the compiler
2581                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2582                 layout = cx.layout_of(unit_pointer_ty);
2583                 layout.ty = fat_pointer_ty;
2584             }
2585             ArgAbi::new(layout)
2586         })
2587     }
2588
2589     fn new_internal(
2590         cx: &C,
2591         sig: ty::FnSig<'tcx>,
2592         extra_args: &[Ty<'tcx>],
2593         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2594     ) -> Self {
2595         debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2596
2597         use rustc_target::spec::abi::Abi::*;
2598         let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2599             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C,
2600
2601             // It's the ABI's job to select this, not ours.
2602             System => bug!("system abi should be selected elsewhere"),
2603             EfiApi => bug!("eficall abi should be selected elsewhere"),
2604
2605             Stdcall => Conv::X86Stdcall,
2606             Fastcall => Conv::X86Fastcall,
2607             Vectorcall => Conv::X86VectorCall,
2608             Thiscall => Conv::X86ThisCall,
2609             C => Conv::C,
2610             Unadjusted => Conv::C,
2611             Win64 => Conv::X86_64Win64,
2612             SysV64 => Conv::X86_64SysV,
2613             Aapcs => Conv::ArmAapcs,
2614             PtxKernel => Conv::PtxKernel,
2615             Msp430Interrupt => Conv::Msp430Intr,
2616             X86Interrupt => Conv::X86Intr,
2617             AmdGpuKernel => Conv::AmdGpuKernel,
2618
2619             // These API constants ought to be more specific...
2620             Cdecl => Conv::C,
2621         };
2622
2623         let mut inputs = sig.inputs();
2624         let extra_args = if sig.abi == RustCall {
2625             assert!(!sig.c_variadic && extra_args.is_empty());
2626
2627             match sig.inputs().last().unwrap().kind {
2628                 ty::Tuple(tupled_arguments) => {
2629                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2630                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2631                 }
2632                 _ => {
2633                     bug!(
2634                         "argument to function with \"rust-call\" ABI \
2635                          is not a tuple"
2636                     );
2637                 }
2638             }
2639         } else {
2640             assert!(sig.c_variadic || extra_args.is_empty());
2641             extra_args.to_vec()
2642         };
2643
2644         let target = &cx.tcx().sess.target.target;
2645         let win_x64_gnu =
2646             target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2647         let linux_s390x =
2648             target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu";
2649         let linux_sparc64 =
2650             target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu";
2651         let rust_abi = match sig.abi {
2652             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2653             _ => false,
2654         };
2655
2656         // Handle safe Rust thin and fat pointers.
2657         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2658                                       scalar: &Scalar,
2659                                       layout: TyLayout<'tcx>,
2660                                       offset: Size,
2661                                       is_return: bool| {
2662             // Booleans are always an i1 that needs to be zero-extended.
2663             if scalar.is_bool() {
2664                 attrs.set(ArgAttribute::ZExt);
2665                 return;
2666             }
2667
2668             // Only pointer types handled below.
2669             if scalar.value != Pointer {
2670                 return;
2671             }
2672
2673             if scalar.valid_range.start() < scalar.valid_range.end() {
2674                 if *scalar.valid_range.start() > 0 {
2675                     attrs.set(ArgAttribute::NonNull);
2676                 }
2677             }
2678
2679             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2680                 if let Some(kind) = pointee.safe {
2681                     attrs.pointee_size = pointee.size;
2682                     attrs.pointee_align = Some(pointee.align);
2683
2684                     // `Box` pointer parameters never alias because ownership is transferred
2685                     // `&mut` pointer parameters never alias other parameters,
2686                     // or mutable global data
2687                     //
2688                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2689                     // and can be marked as both `readonly` and `noalias`, as
2690                     // LLVM's definition of `noalias` is based solely on memory
2691                     // dependencies rather than pointer equality
2692                     let no_alias = match kind {
2693                         PointerKind::Shared => false,
2694                         PointerKind::UniqueOwned => true,
2695                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2696                     };
2697                     if no_alias {
2698                         attrs.set(ArgAttribute::NoAlias);
2699                     }
2700
2701                     if kind == PointerKind::Frozen && !is_return {
2702                         attrs.set(ArgAttribute::ReadOnly);
2703                     }
2704                 }
2705             }
2706         };
2707
2708         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2709             let is_return = arg_idx.is_none();
2710             let mut arg = mk_arg_type(ty, arg_idx);
2711             if arg.layout.is_zst() {
2712                 // For some forsaken reason, x86_64-pc-windows-gnu
2713                 // doesn't ignore zero-sized struct arguments.
2714                 // The same is true for s390x-unknown-linux-gnu
2715                 // and sparc64-unknown-linux-gnu.
2716                 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
2717                     arg.mode = PassMode::Ignore;
2718                 }
2719             }
2720
2721             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2722             if !is_return && rust_abi {
2723                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2724                     let mut a_attrs = ArgAttributes::new();
2725                     let mut b_attrs = ArgAttributes::new();
2726                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2727                     adjust_for_rust_scalar(
2728                         &mut b_attrs,
2729                         b,
2730                         arg.layout,
2731                         a.value.size(cx).align_to(b.value.align(cx).abi),
2732                         false,
2733                     );
2734                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2735                     return arg;
2736                 }
2737             }
2738
2739             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2740                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2741                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2742                 }
2743             }
2744
2745             arg
2746         };
2747
2748         let mut fn_abi = FnAbi {
2749             ret: arg_of(sig.output(), None),
2750             args: inputs
2751                 .iter()
2752                 .cloned()
2753                 .chain(extra_args)
2754                 .enumerate()
2755                 .map(|(i, ty)| arg_of(ty, Some(i)))
2756                 .collect(),
2757             c_variadic: sig.c_variadic,
2758             conv,
2759         };
2760         fn_abi.adjust_for_abi(cx, sig.abi);
2761         fn_abi
2762     }
2763
2764     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2765         if abi == SpecAbi::Unadjusted {
2766             return;
2767         }
2768
2769         if abi == SpecAbi::Rust
2770             || abi == SpecAbi::RustCall
2771             || abi == SpecAbi::RustIntrinsic
2772             || abi == SpecAbi::PlatformIntrinsic
2773         {
2774             let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2775                 if arg.is_ignore() {
2776                     return;
2777                 }
2778
2779                 match arg.layout.abi {
2780                     Abi::Aggregate { .. } => {}
2781
2782                     // This is a fun case! The gist of what this is doing is
2783                     // that we want callers and callees to always agree on the
2784                     // ABI of how they pass SIMD arguments. If we were to *not*
2785                     // make these arguments indirect then they'd be immediates
2786                     // in LLVM, which means that they'd used whatever the
2787                     // appropriate ABI is for the callee and the caller. That
2788                     // means, for example, if the caller doesn't have AVX
2789                     // enabled but the callee does, then passing an AVX argument
2790                     // across this boundary would cause corrupt data to show up.
2791                     //
2792                     // This problem is fixed by unconditionally passing SIMD
2793                     // arguments through memory between callers and callees
2794                     // which should get them all to agree on ABI regardless of
2795                     // target feature sets. Some more information about this
2796                     // issue can be found in #44367.
2797                     //
2798                     // Note that the platform intrinsic ABI is exempt here as
2799                     // that's how we connect up to LLVM and it's unstable
2800                     // anyway, we control all calls to it in libstd.
2801                     Abi::Vector { .. }
2802                         if abi != SpecAbi::PlatformIntrinsic
2803                             && cx.tcx().sess.target.target.options.simd_types_indirect =>
2804                     {
2805                         arg.make_indirect();
2806                         return;
2807                     }
2808
2809                     _ => return,
2810                 }
2811
2812                 let size = arg.layout.size;
2813                 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2814                     arg.make_indirect();
2815                 } else {
2816                     // We want to pass small aggregates as immediates, but using
2817                     // a LLVM aggregate type for this leads to bad optimizations,
2818                     // so we pick an appropriately sized integer type instead.
2819                     arg.cast_to(Reg {
2820                         kind: RegKind::Integer,
2821                         size,
2822                     });
2823                 }
2824             };
2825             fixup(&mut self.ret);
2826             for arg in &mut self.args {
2827                 fixup(arg);
2828             }
2829             if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2830                 attrs.set(ArgAttribute::StructRet);
2831             }
2832             return;
2833         }
2834
2835         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2836             cx.tcx().sess.fatal(&msg);
2837         }
2838     }
2839 }