]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Auto merge of #61300 - indygreg:upgrade-cross-make, r=sanxiyn
[rust.git] / src / librustc / ty / layout.rs
1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
3
4 use syntax::ast::{self, Ident, IntTy, UintTy};
5 use syntax::attr;
6 use syntax_pos::DUMMY_SP;
7
8 use std::cmp;
9 use std::fmt;
10 use std::i128;
11 use std::iter;
12 use std::mem;
13 use std::ops::Bound;
14
15 use crate::hir;
16 use crate::ich::StableHashingContext;
17 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
18 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
19                                            StableHasherResult};
20
21 pub use rustc_target::abi::*;
22 use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
23 use rustc_target::abi::call::{
24     ArgAttribute, ArgAttributes, ArgType, Conv, FnType, IgnoreMode, PassMode, Reg, RegKind
25 };
26
27
28
29 pub trait IntegerExt {
30     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
31     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
32     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
33                             ty: Ty<'tcx>,
34                             repr: &ReprOptions,
35                             min: i128,
36                             max: i128)
37                             -> (Integer, bool);
38 }
39
40 impl IntegerExt for Integer {
41     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
42         match (*self, signed) {
43             (I8, false) => tcx.types.u8,
44             (I16, false) => tcx.types.u16,
45             (I32, false) => tcx.types.u32,
46             (I64, false) => tcx.types.u64,
47             (I128, false) => tcx.types.u128,
48             (I8, true) => tcx.types.i8,
49             (I16, true) => tcx.types.i16,
50             (I32, true) => tcx.types.i32,
51             (I64, true) => tcx.types.i64,
52             (I128, true) => tcx.types.i128,
53         }
54     }
55
56     /// Gets the Integer type from an attr::IntType.
57     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
58         let dl = cx.data_layout();
59
60         match ity {
61             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
62             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
63             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
64             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
65             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
66             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
67                 dl.ptr_sized_integer()
68             }
69         }
70     }
71
72     /// Finds the appropriate Integer type and signedness for the given
73     /// signed discriminant range and #[repr] attribute.
74     /// N.B.: u128 values above i128::MAX will be treated as signed, but
75     /// that shouldn't affect anything, other than maybe debuginfo.
76     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
77                             ty: Ty<'tcx>,
78                             repr: &ReprOptions,
79                             min: i128,
80                             max: i128)
81                             -> (Integer, bool) {
82         // Theoretically, negative values could be larger in unsigned representation
83         // than the unsigned representation of the signed minimum. However, if there
84         // are any negative values, the only valid unsigned representation is u128
85         // which can fit all i128 values, so the result remains unaffected.
86         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
87         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
88
89         let mut min_from_extern = None;
90         let min_default = I8;
91
92         if let Some(ity) = repr.int {
93             let discr = Integer::from_attr(&tcx, ity);
94             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
95             if discr < fit {
96                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
97                       discriminant range of enum `{}", ty)
98             }
99             return (discr, ity.is_signed());
100         }
101
102         if repr.c() {
103             match &tcx.sess.target.target.arch[..] {
104                 // WARNING: the ARM EABI has two variants; the one corresponding
105                 // to `at_least == I32` appears to be used on Linux and NetBSD,
106                 // but some systems may use the variant corresponding to no
107                 // lower bound. However, we don't run on those yet...?
108                 "arm" => min_from_extern = Some(I32),
109                 _ => min_from_extern = Some(I32),
110             }
111         }
112
113         let at_least = min_from_extern.unwrap_or(min_default);
114
115         // If there are no negative values, we can use the unsigned fit.
116         if min >= 0 {
117             (cmp::max(unsigned_fit, at_least), false)
118         } else {
119             (cmp::max(signed_fit, at_least), true)
120         }
121     }
122 }
123
124 pub trait PrimitiveExt {
125     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
126 }
127
128 impl PrimitiveExt for Primitive {
129     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
130         match *self {
131             Int(i, signed) => i.to_ty(tcx, signed),
132             Float(FloatTy::F32) => tcx.types.f32,
133             Float(FloatTy::F64) => tcx.types.f64,
134             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
135         }
136     }
137 }
138
139 /// The first half of a fat pointer.
140 ///
141 /// - For a trait object, this is the address of the box.
142 /// - For a slice, this is the base address.
143 pub const FAT_PTR_ADDR: usize = 0;
144
145 /// The second half of a fat pointer.
146 ///
147 /// - For a trait object, this is the address of the vtable.
148 /// - For a slice, this is the length.
149 pub const FAT_PTR_EXTRA: usize = 1;
150
151 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
152 pub enum LayoutError<'tcx> {
153     Unknown(Ty<'tcx>),
154     SizeOverflow(Ty<'tcx>)
155 }
156
157 impl<'tcx> fmt::Display for LayoutError<'tcx> {
158     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
159         match *self {
160             LayoutError::Unknown(ty) => {
161                 write!(f, "the type `{:?}` has an unknown layout", ty)
162             }
163             LayoutError::SizeOverflow(ty) => {
164                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
165             }
166         }
167     }
168 }
169
170 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
171                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
172                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
173 {
174     ty::tls::with_related_context(tcx, move |icx| {
175         let rec_limit = *tcx.sess.recursion_limit.get();
176         let (param_env, ty) = query.into_parts();
177
178         if icx.layout_depth > rec_limit {
179             tcx.sess.fatal(
180                 &format!("overflow representing the type `{}`", ty));
181         }
182
183         // Update the ImplicitCtxt to increase the layout_depth
184         let icx = ty::tls::ImplicitCtxt {
185             layout_depth: icx.layout_depth + 1,
186             ..icx.clone()
187         };
188
189         ty::tls::enter_context(&icx, |_| {
190             let cx = LayoutCx { tcx, param_env };
191             let layout = cx.layout_raw_uncached(ty);
192             // Type-level uninhabitedness should always imply ABI uninhabitedness.
193             if let Ok(layout) = layout {
194                 if ty.conservative_is_privately_uninhabited(tcx) {
195                     assert!(layout.abi.is_uninhabited());
196                 }
197             }
198             layout
199         })
200     })
201 }
202
203 pub fn provide(providers: &mut ty::query::Providers<'_>) {
204     *providers = ty::query::Providers {
205         layout_raw,
206         ..*providers
207     };
208 }
209
210 pub struct LayoutCx<'tcx, C> {
211     pub tcx: C,
212     pub param_env: ty::ParamEnv<'tcx>,
213 }
214
215 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
216     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
217         let tcx = self.tcx;
218         let param_env = self.param_env;
219         let dl = self.data_layout();
220         let scalar_unit = |value: Primitive| {
221             let bits = value.size(dl).bits();
222             assert!(bits <= 128);
223             Scalar {
224                 value,
225                 valid_range: 0..=(!0 >> (128 - bits))
226             }
227         };
228         let scalar = |value: Primitive| {
229             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
230         };
231         let scalar_pair = |a: Scalar, b: Scalar| {
232             let b_align = b.value.align(dl);
233             let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
234             let b_offset = a.value.size(dl).align_to(b_align.abi);
235             let size = (b_offset + b.value.size(dl)).align_to(align.abi);
236             LayoutDetails {
237                 variants: Variants::Single { index: VariantIdx::new(0) },
238                 fields: FieldPlacement::Arbitrary {
239                     offsets: vec![Size::ZERO, b_offset],
240                     memory_index: vec![0, 1]
241                 },
242                 abi: Abi::ScalarPair(a, b),
243                 align,
244                 size
245             }
246         };
247
248         #[derive(Copy, Clone, Debug)]
249         enum StructKind {
250             /// A tuple, closure, or univariant which cannot be coerced to unsized.
251             AlwaysSized,
252             /// A univariant, the last field of which may be coerced to unsized.
253             MaybeUnsized,
254             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
255             Prefixed(Size, Align),
256         }
257
258         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
259             let packed = repr.packed();
260             if packed && repr.align > 0 {
261                 bug!("struct cannot be packed and aligned");
262             }
263
264             let pack = Align::from_bytes(repr.pack as u64).unwrap();
265
266             let mut align = if packed {
267                 dl.i8_align
268             } else {
269                 dl.aggregate_align
270             };
271
272             let mut sized = true;
273             let mut offsets = vec![Size::ZERO; fields.len()];
274             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
275
276             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
277             if let StructKind::Prefixed(_, align) = kind {
278                 optimize &= align.bytes() == 1;
279             }
280
281             if optimize {
282                 let end = if let StructKind::MaybeUnsized = kind {
283                     fields.len() - 1
284                 } else {
285                     fields.len()
286                 };
287                 let optimizing = &mut inverse_memory_index[..end];
288                 let field_align = |f: &TyLayout<'_>| {
289                     if packed { f.align.abi.min(pack) } else { f.align.abi }
290                 };
291                 match kind {
292                     StructKind::AlwaysSized |
293                     StructKind::MaybeUnsized => {
294                         optimizing.sort_by_key(|&x| {
295                             // Place ZSTs first to avoid "interesting offsets",
296                             // especially with only one or two non-ZST fields.
297                             let f = &fields[x as usize];
298                             (!f.is_zst(), cmp::Reverse(field_align(f)))
299                         });
300                     }
301                     StructKind::Prefixed(..) => {
302                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
303                     }
304                 }
305             }
306
307             // inverse_memory_index holds field indices by increasing memory offset.
308             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
309             // We now write field offsets to the corresponding offset slot;
310             // field 5 with offset 0 puts 0 in offsets[5].
311             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
312
313             let mut offset = Size::ZERO;
314
315             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
316                 let prefix_align = if packed {
317                     prefix_align.min(pack)
318                 } else {
319                     prefix_align
320                 };
321                 align = align.max(AbiAndPrefAlign::new(prefix_align));
322                 offset = prefix_size.align_to(prefix_align);
323             }
324
325             for &i in &inverse_memory_index {
326                 let field = fields[i as usize];
327                 if !sized {
328                     bug!("univariant: field #{} of `{}` comes after unsized field",
329                          offsets.len(), ty);
330                 }
331
332                 if field.is_unsized() {
333                     sized = false;
334                 }
335
336                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
337                 let field_align = if packed {
338                     field.align.min(AbiAndPrefAlign::new(pack))
339                 } else {
340                     field.align
341                 };
342                 offset = offset.align_to(field_align.abi);
343                 align = align.max(field_align);
344
345                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
346                 offsets[i as usize] = offset;
347
348                 offset = offset.checked_add(field.size, dl)
349                     .ok_or(LayoutError::SizeOverflow(ty))?;
350             }
351
352             if repr.align > 0 {
353                 let repr_align = repr.align as u64;
354                 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
355                 debug!("univariant repr_align: {:?}", repr_align);
356             }
357
358             debug!("univariant min_size: {:?}", offset);
359             let min_size = offset;
360
361             // As stated above, inverse_memory_index holds field indices by increasing offset.
362             // This makes it an already-sorted view of the offsets vec.
363             // To invert it, consider:
364             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
365             // Field 5 would be the first element, so memory_index is i:
366             // Note: if we didn't optimize, it's already right.
367
368             let mut memory_index;
369             if optimize {
370                 memory_index = vec![0; inverse_memory_index.len()];
371
372                 for i in 0..inverse_memory_index.len() {
373                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
374                 }
375             } else {
376                 memory_index = inverse_memory_index;
377             }
378
379             let size = min_size.align_to(align.abi);
380             let mut abi = Abi::Aggregate { sized };
381
382             // Unpack newtype ABIs and find scalar pairs.
383             if sized && size.bytes() > 0 {
384                 // All other fields must be ZSTs, and we need them to all start at 0.
385                 let mut zst_offsets =
386                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
387                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
388                     let mut non_zst_fields =
389                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
390
391                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
392                         // We have exactly one non-ZST field.
393                         (Some((i, field)), None, None) => {
394                             // Field fills the struct and it has a scalar or scalar pair ABI.
395                             if offsets[i].bytes() == 0 &&
396                                align.abi == field.align.abi &&
397                                size == field.size {
398                                 match field.abi {
399                                     // For plain scalars, or vectors of them, we can't unpack
400                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
401                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
402                                         abi = field.abi.clone();
403                                     }
404                                     // But scalar pairs are Rust-specific and get
405                                     // treated as aggregates by C ABIs anyway.
406                                     Abi::ScalarPair(..) => {
407                                         abi = field.abi.clone();
408                                     }
409                                     _ => {}
410                                 }
411                             }
412                         }
413
414                         // Two non-ZST fields, and they're both scalars.
415                         (Some((i, &TyLayout {
416                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
417                         })), Some((j, &TyLayout {
418                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
419                         })), None) => {
420                             // Order by the memory placement, not source order.
421                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
422                                 ((i, a), (j, b))
423                             } else {
424                                 ((j, b), (i, a))
425                             };
426                             let pair = scalar_pair(a.clone(), b.clone());
427                             let pair_offsets = match pair.fields {
428                                 FieldPlacement::Arbitrary {
429                                     ref offsets,
430                                     ref memory_index
431                                 } => {
432                                     assert_eq!(memory_index, &[0, 1]);
433                                     offsets
434                                 }
435                                 _ => bug!()
436                             };
437                             if offsets[i] == pair_offsets[0] &&
438                                offsets[j] == pair_offsets[1] &&
439                                align == pair.align &&
440                                size == pair.size {
441                                 // We can use `ScalarPair` only when it matches our
442                                 // already computed layout (including `#[repr(C)]`).
443                                 abi = pair.abi;
444                             }
445                         }
446
447                         _ => {}
448                     }
449                 }
450             }
451
452             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
453                 abi = Abi::Uninhabited;
454             }
455
456             Ok(LayoutDetails {
457                 variants: Variants::Single { index: VariantIdx::new(0) },
458                 fields: FieldPlacement::Arbitrary {
459                     offsets,
460                     memory_index
461                 },
462                 abi,
463                 align,
464                 size
465             })
466         };
467         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
468             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
469         };
470         debug_assert!(!ty.has_infer_types());
471
472         Ok(match ty.sty {
473             // Basic scalars.
474             ty::Bool => {
475                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
476                     value: Int(I8, false),
477                     valid_range: 0..=1
478                 }))
479             }
480             ty::Char => {
481                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
482                     value: Int(I32, false),
483                     valid_range: 0..=0x10FFFF
484                 }))
485             }
486             ty::Int(ity) => {
487                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
488             }
489             ty::Uint(ity) => {
490                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
491             }
492             ty::Float(fty) => scalar(Float(fty)),
493             ty::FnPtr(_) => {
494                 let mut ptr = scalar_unit(Pointer);
495                 ptr.valid_range = 1..=*ptr.valid_range.end();
496                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
497             }
498
499             // The never type.
500             ty::Never => {
501                 tcx.intern_layout(LayoutDetails {
502                     variants: Variants::Single { index: VariantIdx::new(0) },
503                     fields: FieldPlacement::Union(0),
504                     abi: Abi::Uninhabited,
505                     align: dl.i8_align,
506                     size: Size::ZERO
507                 })
508             }
509
510             // Potentially-fat pointers.
511             ty::Ref(_, pointee, _) |
512             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
513                 let mut data_ptr = scalar_unit(Pointer);
514                 if !ty.is_unsafe_ptr() {
515                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
516                 }
517
518                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
519                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
520                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
521                 }
522
523                 let unsized_part = tcx.struct_tail(pointee);
524                 let metadata = match unsized_part.sty {
525                     ty::Foreign(..) => {
526                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
527                     }
528                     ty::Slice(_) | ty::Str => {
529                         scalar_unit(Int(dl.ptr_sized_integer(), false))
530                     }
531                     ty::Dynamic(..) => {
532                         let mut vtable = scalar_unit(Pointer);
533                         vtable.valid_range = 1..=*vtable.valid_range.end();
534                         vtable
535                     }
536                     _ => return Err(LayoutError::Unknown(unsized_part))
537                 };
538
539                 // Effectively a (ptr, meta) tuple.
540                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
541             }
542
543             // Arrays and slices.
544             ty::Array(element, mut count) => {
545                 if count.has_projections() {
546                     count = tcx.normalize_erasing_regions(param_env, count);
547                     if count.has_projections() {
548                         return Err(LayoutError::Unknown(ty));
549                     }
550                 }
551
552                 let element = self.layout_of(element)?;
553                 let count = count.unwrap_usize(tcx);
554                 let size = element.size.checked_mul(count, dl)
555                     .ok_or(LayoutError::SizeOverflow(ty))?;
556
557                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
558                     Abi::Uninhabited
559                 } else {
560                     Abi::Aggregate { sized: true }
561                 };
562
563                 tcx.intern_layout(LayoutDetails {
564                     variants: Variants::Single { index: VariantIdx::new(0) },
565                     fields: FieldPlacement::Array {
566                         stride: element.size,
567                         count
568                     },
569                     abi,
570                     align: element.align,
571                     size
572                 })
573             }
574             ty::Slice(element) => {
575                 let element = self.layout_of(element)?;
576                 tcx.intern_layout(LayoutDetails {
577                     variants: Variants::Single { index: VariantIdx::new(0) },
578                     fields: FieldPlacement::Array {
579                         stride: element.size,
580                         count: 0
581                     },
582                     abi: Abi::Aggregate { sized: false },
583                     align: element.align,
584                     size: Size::ZERO
585                 })
586             }
587             ty::Str => {
588                 tcx.intern_layout(LayoutDetails {
589                     variants: Variants::Single { index: VariantIdx::new(0) },
590                     fields: FieldPlacement::Array {
591                         stride: Size::from_bytes(1),
592                         count: 0
593                     },
594                     abi: Abi::Aggregate { sized: false },
595                     align: dl.i8_align,
596                     size: Size::ZERO
597                 })
598             }
599
600             // Odd unit types.
601             ty::FnDef(..) => {
602                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
603             }
604             ty::Dynamic(..) | ty::Foreign(..) => {
605                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
606                   StructKind::AlwaysSized)?;
607                 match unit.abi {
608                     Abi::Aggregate { ref mut sized } => *sized = false,
609                     _ => bug!()
610                 }
611                 tcx.intern_layout(unit)
612             }
613
614             ty::Generator(def_id, ref substs, _) => {
615                 // FIXME(tmandry): For fields that are repeated in multiple
616                 // variants in the GeneratorLayout, we need code to ensure that
617                 // the offset of these fields never change. Right now this is
618                 // not an issue since every variant has every field, but once we
619                 // optimize this we have to be more careful.
620
621                 let discr_index = substs.prefix_tys(def_id, tcx).count();
622                 let prefix_tys = substs.prefix_tys(def_id, tcx)
623                     .chain(iter::once(substs.discr_ty(tcx)));
624                 let prefix = univariant_uninterned(
625                     &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
626                     &ReprOptions::default(),
627                     StructKind::AlwaysSized)?;
628
629                 let mut size = prefix.size;
630                 let mut align = prefix.align;
631                 let variants_tys = substs.state_tys(def_id, tcx);
632                 let variants = variants_tys.enumerate().map(|(i, variant_tys)| {
633                     let mut variant = univariant_uninterned(
634                         &variant_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
635                         &ReprOptions::default(),
636                         StructKind::Prefixed(prefix.size, prefix.align.abi))?;
637
638                     variant.variants = Variants::Single { index: VariantIdx::new(i) };
639
640                     size = size.max(variant.size);
641                     align = align.max(variant.align);
642
643                     Ok(variant)
644                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
645
646                 let abi = if prefix.abi.is_uninhabited() ||
647                              variants.iter().all(|v| v.abi.is_uninhabited()) {
648                     Abi::Uninhabited
649                 } else {
650                     Abi::Aggregate { sized: true }
651                 };
652                 let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi {
653                     Abi::Scalar(s) => s.clone(),
654                     _ => bug!(),
655                 };
656
657                 let layout = tcx.intern_layout(LayoutDetails {
658                     variants: Variants::Multiple {
659                         discr,
660                         discr_kind: DiscriminantKind::Tag,
661                         discr_index,
662                         variants,
663                     },
664                     fields: prefix.fields,
665                     abi,
666                     size,
667                     align,
668                 });
669                 debug!("generator layout: {:#?}", layout);
670                 layout
671             }
672
673             ty::Closure(def_id, ref substs) => {
674                 let tys = substs.upvar_tys(def_id, tcx);
675                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
676                     &ReprOptions::default(),
677                     StructKind::AlwaysSized)?
678             }
679
680             ty::Tuple(tys) => {
681                 let kind = if tys.len() == 0 {
682                     StructKind::AlwaysSized
683                 } else {
684                     StructKind::MaybeUnsized
685                 };
686
687                 univariant(&tys.iter().map(|k| {
688                     self.layout_of(k.expect_ty())
689                 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
690             }
691
692             // SIMD vector types.
693             ty::Adt(def, ..) if def.repr.simd() => {
694                 let element = self.layout_of(ty.simd_type(tcx))?;
695                 let count = ty.simd_size(tcx) as u64;
696                 assert!(count > 0);
697                 let scalar = match element.abi {
698                     Abi::Scalar(ref scalar) => scalar.clone(),
699                     _ => {
700                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
701                                                  a non-machine element type `{}`",
702                                                 ty, element.ty));
703                     }
704                 };
705                 let size = element.size.checked_mul(count, dl)
706                     .ok_or(LayoutError::SizeOverflow(ty))?;
707                 let align = dl.vector_align(size);
708                 let size = size.align_to(align.abi);
709
710                 tcx.intern_layout(LayoutDetails {
711                     variants: Variants::Single { index: VariantIdx::new(0) },
712                     fields: FieldPlacement::Array {
713                         stride: element.size,
714                         count
715                     },
716                     abi: Abi::Vector {
717                         element: scalar,
718                         count
719                     },
720                     size,
721                     align,
722                 })
723             }
724
725             // ADTs.
726             ty::Adt(def, substs) => {
727                 // Cache the field layouts.
728                 let variants = def.variants.iter().map(|v| {
729                     v.fields.iter().map(|field| {
730                         self.layout_of(field.ty(tcx, substs))
731                     }).collect::<Result<Vec<_>, _>>()
732                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
733
734                 if def.is_union() {
735                     let packed = def.repr.packed();
736                     if packed && def.repr.align > 0 {
737                         bug!("Union cannot be packed and aligned");
738                     }
739
740                     let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
741
742                     let mut align = if packed {
743                         dl.i8_align
744                     } else {
745                         dl.aggregate_align
746                     };
747
748                     if def.repr.align > 0 {
749                         let repr_align = def.repr.align as u64;
750                         align = align.max(
751                             AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
752                     }
753
754                     let optimize = !def.repr.inhibit_union_abi_opt();
755                     let mut size = Size::ZERO;
756                     let mut abi = Abi::Aggregate { sized: true };
757                     let index = VariantIdx::new(0);
758                     for field in &variants[index] {
759                         assert!(!field.is_unsized());
760
761                         let field_align = if packed {
762                             field.align.min(AbiAndPrefAlign::new(pack))
763                         } else {
764                             field.align
765                         };
766                         align = align.max(field_align);
767
768                         // If all non-ZST fields have the same ABI, forward this ABI
769                         if optimize && !field.is_zst() {
770                             // Normalize scalar_unit to the maximal valid range
771                             let field_abi = match &field.abi {
772                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
773                                 Abi::ScalarPair(x, y) => {
774                                     Abi::ScalarPair(
775                                         scalar_unit(x.value),
776                                         scalar_unit(y.value),
777                                     )
778                                 }
779                                 Abi::Vector { element: x, count } => {
780                                     Abi::Vector {
781                                         element: scalar_unit(x.value),
782                                         count: *count,
783                                     }
784                                 }
785                                 Abi::Uninhabited |
786                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
787                             };
788
789                             if size == Size::ZERO {
790                                 // first non ZST: initialize 'abi'
791                                 abi = field_abi;
792                             } else if abi != field_abi  {
793                                 // different fields have different ABI: reset to Aggregate
794                                 abi = Abi::Aggregate { sized: true };
795                             }
796                         }
797
798                         size = cmp::max(size, field.size);
799                     }
800
801                     return Ok(tcx.intern_layout(LayoutDetails {
802                         variants: Variants::Single { index },
803                         fields: FieldPlacement::Union(variants[index].len()),
804                         abi,
805                         align,
806                         size: size.align_to(align.abi)
807                     }));
808                 }
809
810                 // A variant is absent if it's uninhabited and only has ZST fields.
811                 // Present uninhabited variants only require space for their fields,
812                 // but *not* an encoding of the discriminant (e.g., a tag value).
813                 // See issue #49298 for more details on the need to leave space
814                 // for non-ZST uninhabited data (mostly partial initialization).
815                 let absent = |fields: &[TyLayout<'_>]| {
816                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
817                     let is_zst = fields.iter().all(|f| f.is_zst());
818                     uninhabited && is_zst
819                 };
820                 let (present_first, present_second) = {
821                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
822                         if absent(v) {
823                             None
824                         } else {
825                             Some(i)
826                         }
827                     });
828                     (present_variants.next(), present_variants.next())
829                 };
830                 if present_first.is_none() {
831                     // Uninhabited because it has no variants, or only absent ones.
832                     return tcx.layout_raw(param_env.and(tcx.types.never));
833                 }
834
835                 let is_struct = !def.is_enum() ||
836                     // Only one variant is present.
837                     (present_second.is_none() &&
838                     // Representation optimizations are allowed.
839                     !def.repr.inhibit_enum_layout_opt());
840                 if is_struct {
841                     // Struct, or univariant enum equivalent to a struct.
842                     // (Typechecking will reject discriminant-sizing attrs.)
843
844                     let v = present_first.unwrap();
845                     let kind = if def.is_enum() || variants[v].len() == 0 {
846                         StructKind::AlwaysSized
847                     } else {
848                         let param_env = tcx.param_env(def.did);
849                         let last_field = def.variants[v].fields.last().unwrap();
850                         let always_sized = tcx.type_of(last_field.did)
851                                               .is_sized(tcx.at(DUMMY_SP), param_env);
852                         if !always_sized { StructKind::MaybeUnsized }
853                         else { StructKind::AlwaysSized }
854                     };
855
856                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
857                     st.variants = Variants::Single { index: v };
858                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
859                     match st.abi {
860                         Abi::Scalar(ref mut scalar) |
861                         Abi::ScalarPair(ref mut scalar, _) => {
862                             // the asserts ensure that we are not using the
863                             // `#[rustc_layout_scalar_valid_range(n)]`
864                             // attribute to widen the range of anything as that would probably
865                             // result in UB somewhere
866                             if let Bound::Included(start) = start {
867                                 assert!(*scalar.valid_range.start() <= start);
868                                 scalar.valid_range = start..=*scalar.valid_range.end();
869                             }
870                             if let Bound::Included(end) = end {
871                                 assert!(*scalar.valid_range.end() >= end);
872                                 scalar.valid_range = *scalar.valid_range.start()..=end;
873                             }
874                         }
875                         _ => assert!(
876                             start == Bound::Unbounded && end == Bound::Unbounded,
877                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
878                             def,
879                             st,
880                         ),
881                     }
882                     return Ok(tcx.intern_layout(st));
883                 }
884
885                 // The current code for niche-filling relies on variant indices
886                 // instead of actual discriminants, so dataful enums with
887                 // explicit discriminants (RFC #2363) would misbehave.
888                 let no_explicit_discriminants = def.variants.iter_enumerated()
889                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
890
891                 // Niche-filling enum optimization.
892                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
893                     let mut dataful_variant = None;
894                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
895
896                     // Find one non-ZST variant.
897                     'variants: for (v, fields) in variants.iter_enumerated() {
898                         if absent(fields) {
899                             continue 'variants;
900                         }
901                         for f in fields {
902                             if !f.is_zst() {
903                                 if dataful_variant.is_none() {
904                                     dataful_variant = Some(v);
905                                     continue 'variants;
906                                 } else {
907                                     dataful_variant = None;
908                                     break 'variants;
909                                 }
910                             }
911                         }
912                         niche_variants = *niche_variants.start().min(&v)..=v;
913                     }
914
915                     if niche_variants.start() > niche_variants.end() {
916                         dataful_variant = None;
917                     }
918
919                     if let Some(i) = dataful_variant {
920                         let count = (
921                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
922                         ) as u128;
923                         for (field_index, &field) in variants[i].iter().enumerate() {
924                             let niche = match self.find_niche(field)? {
925                                 Some(niche) => niche,
926                                 _ => continue,
927                             };
928                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
929                                 Some(pair) => pair,
930                                 None => continue,
931                             };
932
933                             let mut align = dl.aggregate_align;
934                             let st = variants.iter_enumerated().map(|(j, v)| {
935                                 let mut st = univariant_uninterned(v,
936                                     &def.repr, StructKind::AlwaysSized)?;
937                                 st.variants = Variants::Single { index: j };
938
939                                 align = align.max(st.align);
940
941                                 Ok(st)
942                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
943
944                             let offset = st[i].fields.offset(field_index) + niche.offset;
945                             let size = st[i].size;
946
947                             let mut abi = match st[i].abi {
948                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
949                                 Abi::ScalarPair(ref first, ref second) => {
950                                     // We need to use scalar_unit to reset the
951                                     // valid range to the maximal one for that
952                                     // primitive, because only the niche is
953                                     // guaranteed to be initialised, not the
954                                     // other primitive.
955                                     if offset.bytes() == 0 {
956                                         Abi::ScalarPair(
957                                             niche_scalar.clone(),
958                                             scalar_unit(second.value),
959                                         )
960                                     } else {
961                                         Abi::ScalarPair(
962                                             scalar_unit(first.value),
963                                             niche_scalar.clone(),
964                                         )
965                                     }
966                                 }
967                                 _ => Abi::Aggregate { sized: true },
968                             };
969
970                             if st.iter().all(|v| v.abi.is_uninhabited()) {
971                                 abi = Abi::Uninhabited;
972                             }
973
974                             return Ok(tcx.intern_layout(LayoutDetails {
975                                 variants: Variants::Multiple {
976                                     discr: niche_scalar,
977                                     discr_kind: DiscriminantKind::Niche {
978                                         dataful_variant: i,
979                                         niche_variants,
980                                         niche_start,
981                                     },
982                                     discr_index: 0,
983                                     variants: st,
984                                 },
985                                 fields: FieldPlacement::Arbitrary {
986                                     offsets: vec![offset],
987                                     memory_index: vec![0]
988                                 },
989                                 abi,
990                                 size,
991                                 align,
992                             }));
993                         }
994                     }
995                 }
996
997                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
998                 let discr_type = def.repr.discr_type();
999                 let bits = Integer::from_attr(self, discr_type).size().bits();
1000                 for (i, discr) in def.discriminants(tcx) {
1001                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1002                         continue;
1003                     }
1004                     let mut x = discr.val as i128;
1005                     if discr_type.is_signed() {
1006                         // sign extend the raw representation to be an i128
1007                         x = (x << (128 - bits)) >> (128 - bits);
1008                     }
1009                     if x < min { min = x; }
1010                     if x > max { max = x; }
1011                 }
1012                 // We might have no inhabited variants, so pretend there's at least one.
1013                 if (min, max) == (i128::max_value(), i128::min_value()) {
1014                     min = 0;
1015                     max = 0;
1016                 }
1017                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1018                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1019
1020                 let mut align = dl.aggregate_align;
1021                 let mut size = Size::ZERO;
1022
1023                 // We're interested in the smallest alignment, so start large.
1024                 let mut start_align = Align::from_bytes(256).unwrap();
1025                 assert_eq!(Integer::for_align(dl, start_align), None);
1026
1027                 // repr(C) on an enum tells us to make a (tag, union) layout,
1028                 // so we need to grow the prefix alignment to be at least
1029                 // the alignment of the union. (This value is used both for
1030                 // determining the alignment of the overall enum, and the
1031                 // determining the alignment of the payload after the tag.)
1032                 let mut prefix_align = min_ity.align(dl).abi;
1033                 if def.repr.c() {
1034                     for fields in &variants {
1035                         for field in fields {
1036                             prefix_align = prefix_align.max(field.align.abi);
1037                         }
1038                     }
1039                 }
1040
1041                 // Create the set of structs that represent each variant.
1042                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
1043                     let mut st = univariant_uninterned(&field_layouts,
1044                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1045                     st.variants = Variants::Single { index: i };
1046                     // Find the first field we can't move later
1047                     // to make room for a larger discriminant.
1048                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1049                         if !field.is_zst() || field.align.abi.bytes() != 1 {
1050                             start_align = start_align.min(field.align.abi);
1051                             break;
1052                         }
1053                     }
1054                     size = cmp::max(size, st.size);
1055                     align = align.max(st.align);
1056                     Ok(st)
1057                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1058
1059                 // Align the maximum variant size to the largest alignment.
1060                 size = size.align_to(align.abi);
1061
1062                 if size.bytes() >= dl.obj_size_bound() {
1063                     return Err(LayoutError::SizeOverflow(ty));
1064                 }
1065
1066                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1067                 if typeck_ity < min_ity {
1068                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1069                     // some reason at this point (based on values discriminant can take on). Mostly
1070                     // because this discriminant will be loaded, and then stored into variable of
1071                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1072                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1073                     // discriminant values. That would be a bug, because then, in codegen, in order
1074                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1075                     // space necessary to represent would have to be discarded (or layout is wrong
1076                     // on thinking it needs 16 bits)
1077                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1078                          min_ity, typeck_ity);
1079                     // However, it is fine to make discr type however large (as an optimisation)
1080                     // after this point â€“ we’ll just truncate the value we load in codegen.
1081                 }
1082
1083                 // Check to see if we should use a different type for the
1084                 // discriminant. We can safely use a type with the same size
1085                 // as the alignment of the first field of each variant.
1086                 // We increase the size of the discriminant to avoid LLVM copying
1087                 // padding when it doesn't need to. This normally causes unaligned
1088                 // load/stores and excessive memcpy/memset operations. By using a
1089                 // bigger integer size, LLVM can be sure about its contents and
1090                 // won't be so conservative.
1091
1092                 // Use the initial field alignment
1093                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1094                     min_ity
1095                 } else {
1096                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1097                 };
1098
1099                 // If the alignment is not larger than the chosen discriminant size,
1100                 // don't use the alignment as the final size.
1101                 if ity <= min_ity {
1102                     ity = min_ity;
1103                 } else {
1104                     // Patch up the variants' first few fields.
1105                     let old_ity_size = min_ity.size();
1106                     let new_ity_size = ity.size();
1107                     for variant in &mut layout_variants {
1108                         match variant.fields {
1109                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1110                                 for i in offsets {
1111                                     if *i <= old_ity_size {
1112                                         assert_eq!(*i, old_ity_size);
1113                                         *i = new_ity_size;
1114                                     }
1115                                 }
1116                                 // We might be making the struct larger.
1117                                 if variant.size <= old_ity_size {
1118                                     variant.size = new_ity_size;
1119                                 }
1120                             }
1121                             _ => bug!()
1122                         }
1123                     }
1124                 }
1125
1126                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1127                 let tag = Scalar {
1128                     value: Int(ity, signed),
1129                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1130                 };
1131                 let mut abi = Abi::Aggregate { sized: true };
1132                 if tag.value.size(dl) == size {
1133                     abi = Abi::Scalar(tag.clone());
1134                 } else {
1135                     // Try to use a ScalarPair for all tagged enums.
1136                     let mut common_prim = None;
1137                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1138                         let offsets = match layout_variant.fields {
1139                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1140                             _ => bug!(),
1141                         };
1142                         let mut fields = field_layouts
1143                             .iter()
1144                             .zip(offsets)
1145                             .filter(|p| !p.0.is_zst());
1146                         let (field, offset) = match (fields.next(), fields.next()) {
1147                             (None, None) => continue,
1148                             (Some(pair), None) => pair,
1149                             _ => {
1150                                 common_prim = None;
1151                                 break;
1152                             }
1153                         };
1154                         let prim = match field.details.abi {
1155                             Abi::Scalar(ref scalar) => scalar.value,
1156                             _ => {
1157                                 common_prim = None;
1158                                 break;
1159                             }
1160                         };
1161                         if let Some(pair) = common_prim {
1162                             // This is pretty conservative. We could go fancier
1163                             // by conflating things like i32 and u32, or even
1164                             // realising that (u8, u8) could just cohabit with
1165                             // u16 or even u32.
1166                             if pair != (prim, offset) {
1167                                 common_prim = None;
1168                                 break;
1169                             }
1170                         } else {
1171                             common_prim = Some((prim, offset));
1172                         }
1173                     }
1174                     if let Some((prim, offset)) = common_prim {
1175                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1176                         let pair_offsets = match pair.fields {
1177                             FieldPlacement::Arbitrary {
1178                                 ref offsets,
1179                                 ref memory_index
1180                             } => {
1181                                 assert_eq!(memory_index, &[0, 1]);
1182                                 offsets
1183                             }
1184                             _ => bug!()
1185                         };
1186                         if pair_offsets[0] == Size::ZERO &&
1187                             pair_offsets[1] == *offset &&
1188                             align == pair.align &&
1189                             size == pair.size {
1190                             // We can use `ScalarPair` only when it matches our
1191                             // already computed layout (including `#[repr(C)]`).
1192                             abi = pair.abi;
1193                         }
1194                     }
1195                 }
1196
1197                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1198                     abi = Abi::Uninhabited;
1199                 }
1200
1201                 tcx.intern_layout(LayoutDetails {
1202                     variants: Variants::Multiple {
1203                         discr: tag,
1204                         discr_kind: DiscriminantKind::Tag,
1205                         discr_index: 0,
1206                         variants: layout_variants,
1207                     },
1208                     fields: FieldPlacement::Arbitrary {
1209                         offsets: vec![Size::ZERO],
1210                         memory_index: vec![0]
1211                     },
1212                     abi,
1213                     align,
1214                     size
1215                 })
1216             }
1217
1218             // Types with no meaningful known layout.
1219             ty::Projection(_) | ty::Opaque(..) => {
1220                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1221                 if ty == normalized {
1222                     return Err(LayoutError::Unknown(ty));
1223                 }
1224                 tcx.layout_raw(param_env.and(normalized))?
1225             }
1226
1227             ty::Bound(..) |
1228             ty::Placeholder(..) |
1229             ty::UnnormalizedProjection(..) |
1230             ty::GeneratorWitness(..) |
1231             ty::Infer(_) => {
1232                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1233             }
1234
1235             ty::Param(_) | ty::Error => {
1236                 return Err(LayoutError::Unknown(ty));
1237             }
1238         })
1239     }
1240
1241     /// This is invoked by the `layout_raw` query to record the final
1242     /// layout of each type.
1243     #[inline(always)]
1244     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1245         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1246         // for dumping later.
1247         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1248             self.record_layout_for_printing_outlined(layout)
1249         }
1250     }
1251
1252     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1253         // Ignore layouts that are done with non-empty environments or
1254         // non-monomorphic layouts, as the user only wants to see the stuff
1255         // resulting from the final codegen session.
1256         if
1257             layout.ty.has_param_types() ||
1258             layout.ty.has_self_ty() ||
1259             !self.param_env.caller_bounds.is_empty()
1260         {
1261             return;
1262         }
1263
1264         // (delay format until we actually need it)
1265         let record = |kind, packed, opt_discr_size, variants| {
1266             let type_desc = format!("{:?}", layout.ty);
1267             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1268                                                                    type_desc,
1269                                                                    layout.align.abi,
1270                                                                    layout.size,
1271                                                                    packed,
1272                                                                    opt_discr_size,
1273                                                                    variants);
1274         };
1275
1276         let adt_def = match layout.ty.sty {
1277             ty::Adt(ref adt_def, _) => {
1278                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1279                 adt_def
1280             }
1281
1282             ty::Closure(..) => {
1283                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1284                 record(DataTypeKind::Closure, false, None, vec![]);
1285                 return;
1286             }
1287
1288             _ => {
1289                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1290                 return;
1291             }
1292         };
1293
1294         let adt_kind = adt_def.adt_kind();
1295         let adt_packed = adt_def.repr.packed();
1296
1297         let build_variant_info = |n: Option<Ident>,
1298                                   flds: &[ast::Name],
1299                                   layout: TyLayout<'tcx>| {
1300             let mut min_size = Size::ZERO;
1301             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1302                 match layout.field(self, i) {
1303                     Err(err) => {
1304                         bug!("no layout found for field {}: `{:?}`", name, err);
1305                     }
1306                     Ok(field_layout) => {
1307                         let offset = layout.fields.offset(i);
1308                         let field_end = offset + field_layout.size;
1309                         if min_size < field_end {
1310                             min_size = field_end;
1311                         }
1312                         session::FieldInfo {
1313                             name: name.to_string(),
1314                             offset: offset.bytes(),
1315                             size: field_layout.size.bytes(),
1316                             align: field_layout.align.abi.bytes(),
1317                         }
1318                     }
1319                 }
1320             }).collect();
1321
1322             session::VariantInfo {
1323                 name: n.map(|n| n.to_string()),
1324                 kind: if layout.is_unsized() {
1325                     session::SizeKind::Min
1326                 } else {
1327                     session::SizeKind::Exact
1328                 },
1329                 align: layout.align.abi.bytes(),
1330                 size: if min_size.bytes() == 0 {
1331                     layout.size.bytes()
1332                 } else {
1333                     min_size.bytes()
1334                 },
1335                 fields: field_info,
1336             }
1337         };
1338
1339         match layout.variants {
1340             Variants::Single { index } => {
1341                 debug!("print-type-size `{:#?}` variant {}",
1342                        layout, adt_def.variants[index].ident);
1343                 if !adt_def.variants.is_empty() {
1344                     let variant_def = &adt_def.variants[index];
1345                     let fields: Vec<_> =
1346                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1347                     record(adt_kind.into(),
1348                            adt_packed,
1349                            None,
1350                            vec![build_variant_info(Some(variant_def.ident),
1351                                                    &fields,
1352                                                    layout)]);
1353                 } else {
1354                     // (This case arises for *empty* enums; so give it
1355                     // zero variants.)
1356                     record(adt_kind.into(), adt_packed, None, vec![]);
1357                 }
1358             }
1359
1360             Variants::Multiple { ref discr, ref discr_kind, .. } => {
1361                 debug!("print-type-size `{:#?}` adt general variants def {}",
1362                        layout.ty, adt_def.variants.len());
1363                 let variant_infos: Vec<_> =
1364                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1365                         let fields: Vec<_> =
1366                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1367                         build_variant_info(Some(variant_def.ident),
1368                                            &fields,
1369                                            layout.for_variant(self, i))
1370                     })
1371                     .collect();
1372                 record(adt_kind.into(), adt_packed, match discr_kind {
1373                     DiscriminantKind::Tag => Some(discr.value.size(self)),
1374                     _ => None
1375                 }, variant_infos);
1376             }
1377         }
1378     }
1379 }
1380
1381 /// Type size "skeleton", i.e., the only information determining a type's size.
1382 /// While this is conservative, (aside from constant sizes, only pointers,
1383 /// newtypes thereof and null pointer optimized enums are allowed), it is
1384 /// enough to statically check common use cases of transmute.
1385 #[derive(Copy, Clone, Debug)]
1386 pub enum SizeSkeleton<'tcx> {
1387     /// Any statically computable Layout.
1388     Known(Size),
1389
1390     /// A potentially-fat pointer.
1391     Pointer {
1392         /// If true, this pointer is never null.
1393         non_zero: bool,
1394         /// The type which determines the unsized metadata, if any,
1395         /// of this pointer. Either a type parameter or a projection
1396         /// depending on one, with regions erased.
1397         tail: Ty<'tcx>
1398     }
1399 }
1400
1401 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1402     pub fn compute(ty: Ty<'tcx>,
1403                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1404                    param_env: ty::ParamEnv<'tcx>)
1405                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1406         debug_assert!(!ty.has_infer_types());
1407
1408         // First try computing a static layout.
1409         let err = match tcx.layout_of(param_env.and(ty)) {
1410             Ok(layout) => {
1411                 return Ok(SizeSkeleton::Known(layout.size));
1412             }
1413             Err(err) => err
1414         };
1415
1416         match ty.sty {
1417             ty::Ref(_, pointee, _) |
1418             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1419                 let non_zero = !ty.is_unsafe_ptr();
1420                 let tail = tcx.struct_tail(pointee);
1421                 match tail.sty {
1422                     ty::Param(_) | ty::Projection(_) => {
1423                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1424                         Ok(SizeSkeleton::Pointer {
1425                             non_zero,
1426                             tail: tcx.erase_regions(&tail)
1427                         })
1428                     }
1429                     _ => {
1430                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1431                               tail `{}` is not a type parameter or a projection",
1432                              ty, err, tail)
1433                     }
1434                 }
1435             }
1436
1437             ty::Adt(def, substs) => {
1438                 // Only newtypes and enums w/ nullable pointer optimization.
1439                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1440                     return Err(err);
1441                 }
1442
1443                 // Get a zero-sized variant or a pointer newtype.
1444                 let zero_or_ptr_variant = |i| {
1445                     let i = VariantIdx::new(i);
1446                     let fields = def.variants[i].fields.iter().map(|field| {
1447                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1448                     });
1449                     let mut ptr = None;
1450                     for field in fields {
1451                         let field = field?;
1452                         match field {
1453                             SizeSkeleton::Known(size) => {
1454                                 if size.bytes() > 0 {
1455                                     return Err(err);
1456                                 }
1457                             }
1458                             SizeSkeleton::Pointer {..} => {
1459                                 if ptr.is_some() {
1460                                     return Err(err);
1461                                 }
1462                                 ptr = Some(field);
1463                             }
1464                         }
1465                     }
1466                     Ok(ptr)
1467                 };
1468
1469                 let v0 = zero_or_ptr_variant(0)?;
1470                 // Newtype.
1471                 if def.variants.len() == 1 {
1472                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1473                         return Ok(SizeSkeleton::Pointer {
1474                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1475                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1476                                 (Bound::Included(start), Bound::Included(end)) =>
1477                                     0 < start && start < end,
1478                                 _ => false,
1479                             },
1480                             tail,
1481                         });
1482                     } else {
1483                         return Err(err);
1484                     }
1485                 }
1486
1487                 let v1 = zero_or_ptr_variant(1)?;
1488                 // Nullable pointer enum optimization.
1489                 match (v0, v1) {
1490                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1491                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1492                         Ok(SizeSkeleton::Pointer {
1493                             non_zero: false,
1494                             tail,
1495                         })
1496                     }
1497                     _ => Err(err)
1498                 }
1499             }
1500
1501             ty::Projection(_) | ty::Opaque(..) => {
1502                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1503                 if ty == normalized {
1504                     Err(err)
1505                 } else {
1506                     SizeSkeleton::compute(normalized, tcx, param_env)
1507                 }
1508             }
1509
1510             _ => Err(err)
1511         }
1512     }
1513
1514     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1515         match (self, other) {
1516             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1517             (SizeSkeleton::Pointer { tail: a, .. },
1518              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1519             _ => false
1520         }
1521     }
1522 }
1523
1524 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1525     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1526 }
1527
1528 pub trait HasParamEnv<'tcx> {
1529     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1530 }
1531
1532 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1533     fn data_layout(&self) -> &TargetDataLayout {
1534         &self.data_layout
1535     }
1536 }
1537
1538 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1539     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1540         self.global_tcx()
1541     }
1542 }
1543
1544 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1545     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1546         self.param_env
1547     }
1548 }
1549
1550 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1551     fn data_layout(&self) -> &TargetDataLayout {
1552         self.tcx.data_layout()
1553     }
1554 }
1555
1556 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1557     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1558         self.tcx.tcx()
1559     }
1560 }
1561
1562 pub trait MaybeResult<T> {
1563     type Error;
1564
1565     fn from(x: Result<T, Self::Error>) -> Self;
1566     fn to_result(self) -> Result<T, Self::Error>;
1567 }
1568
1569 impl<T> MaybeResult<T> for T {
1570     type Error = !;
1571
1572     fn from(x: Result<T, Self::Error>) -> Self {
1573         let Ok(x) = x;
1574         x
1575     }
1576     fn to_result(self) -> Result<T, Self::Error> {
1577         Ok(self)
1578     }
1579 }
1580
1581 impl<T, E> MaybeResult<T> for Result<T, E> {
1582     type Error = E;
1583
1584     fn from(x: Result<T, Self::Error>) -> Self {
1585         x
1586     }
1587     fn to_result(self) -> Result<T, Self::Error> {
1588         self
1589     }
1590 }
1591
1592 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1593
1594 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1595     type Ty = Ty<'tcx>;
1596     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1597
1598     /// Computes the layout of a type. Note that this implicitly
1599     /// executes in "reveal all" mode.
1600     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1601         let param_env = self.param_env.with_reveal_all();
1602         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1603         let details = self.tcx.layout_raw(param_env.and(ty))?;
1604         let layout = TyLayout {
1605             ty,
1606             details
1607         };
1608
1609         // N.B., this recording is normally disabled; when enabled, it
1610         // can however trigger recursive invocations of `layout_of`.
1611         // Therefore, we execute it *after* the main query has
1612         // completed, to avoid problems around recursive structures
1613         // and the like. (Admittedly, I wasn't able to reproduce a problem
1614         // here, but it seems like the right thing to do. -nmatsakis)
1615         self.record_layout_for_printing(layout);
1616
1617         Ok(layout)
1618     }
1619 }
1620
1621 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1622     type Ty = Ty<'tcx>;
1623     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1624
1625     /// Computes the layout of a type. Note that this implicitly
1626     /// executes in "reveal all" mode.
1627     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1628         let param_env = self.param_env.with_reveal_all();
1629         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1630         let details = self.tcx.layout_raw(param_env.and(ty))?;
1631         let layout = TyLayout {
1632             ty,
1633             details
1634         };
1635
1636         // N.B., this recording is normally disabled; when enabled, it
1637         // can however trigger recursive invocations of `layout_of`.
1638         // Therefore, we execute it *after* the main query has
1639         // completed, to avoid problems around recursive structures
1640         // and the like. (Admittedly, I wasn't able to reproduce a problem
1641         // here, but it seems like the right thing to do. -nmatsakis)
1642         let cx = LayoutCx {
1643             tcx: *self.tcx,
1644             param_env: self.param_env
1645         };
1646         cx.record_layout_for_printing(layout);
1647
1648         Ok(layout)
1649     }
1650 }
1651
1652 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1653 impl TyCtxt<'a, 'tcx, '_> {
1654     /// Computes the layout of a type. Note that this implicitly
1655     /// executes in "reveal all" mode.
1656     #[inline]
1657     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1658                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1659         let cx = LayoutCx {
1660             tcx: self.global_tcx(),
1661             param_env: param_env_and_ty.param_env
1662         };
1663         cx.layout_of(param_env_and_ty.value)
1664     }
1665 }
1666
1667 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1668     /// Computes the layout of a type. Note that this implicitly
1669     /// executes in "reveal all" mode.
1670     #[inline]
1671     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1672                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1673         let cx = LayoutCx {
1674             tcx: self.global_tcx().at(self.span),
1675             param_env: param_env_and_ty.param_env
1676         };
1677         cx.layout_of(param_env_and_ty.value)
1678     }
1679 }
1680
1681 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1682     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1683           C::TyLayout: MaybeResult<TyLayout<'tcx>>,
1684           C: HasParamEnv<'tcx>
1685 {
1686     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1687         let details = match this.variants {
1688             Variants::Single { index } if index == variant_index => this.details,
1689
1690             Variants::Single { index } => {
1691                 // Deny calling for_variant more than once for non-Single enums.
1692                 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
1693                     assert_eq!(layout.variants, Variants::Single { index });
1694                 }
1695
1696                 let fields = match this.ty.sty {
1697                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1698                     _ => bug!()
1699                 };
1700                 let tcx = cx.tcx();
1701                 tcx.intern_layout(LayoutDetails {
1702                     variants: Variants::Single { index: variant_index },
1703                     fields: FieldPlacement::Union(fields),
1704                     abi: Abi::Uninhabited,
1705                     align: tcx.data_layout.i8_align,
1706                     size: Size::ZERO
1707                 })
1708             }
1709
1710             Variants::Multiple { ref variants, .. } => {
1711                 &variants[variant_index]
1712             }
1713         };
1714
1715         assert_eq!(details.variants, Variants::Single { index: variant_index });
1716
1717         TyLayout {
1718             ty: this.ty,
1719             details
1720         }
1721     }
1722
1723     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1724         let tcx = cx.tcx();
1725         let discr_layout = |discr: &Scalar| -> C::TyLayout {
1726             let layout = LayoutDetails::scalar(cx, discr.clone());
1727             MaybeResult::from(Ok(TyLayout {
1728                 details: tcx.intern_layout(layout),
1729                 ty: discr.value.to_ty(tcx),
1730             }))
1731         };
1732
1733         cx.layout_of(match this.ty.sty {
1734             ty::Bool |
1735             ty::Char |
1736             ty::Int(_) |
1737             ty::Uint(_) |
1738             ty::Float(_) |
1739             ty::FnPtr(_) |
1740             ty::Never |
1741             ty::FnDef(..) |
1742             ty::GeneratorWitness(..) |
1743             ty::Foreign(..) |
1744             ty::Dynamic(..) => {
1745                 bug!("TyLayout::field_type({:?}): not applicable", this)
1746             }
1747
1748             // Potentially-fat pointers.
1749             ty::Ref(_, pointee, _) |
1750             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1751                 assert!(i < this.fields.count());
1752
1753                 // Reuse the fat *T type as its own thin pointer data field.
1754                 // This provides information about e.g., DST struct pointees
1755                 // (which may have no non-DST form), and will work as long
1756                 // as the `Abi` or `FieldPlacement` is checked by users.
1757                 if i == 0 {
1758                     let nil = tcx.mk_unit();
1759                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1760                         tcx.mk_mut_ptr(nil)
1761                     } else {
1762                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
1763                     };
1764                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
1765                         ptr_layout.ty = this.ty;
1766                         ptr_layout
1767                     }));
1768                 }
1769
1770                 match tcx.struct_tail(pointee).sty {
1771                     ty::Slice(_) |
1772                     ty::Str => tcx.types.usize,
1773                     ty::Dynamic(_, _) => {
1774                         tcx.mk_imm_ref(
1775                             tcx.lifetimes.re_static,
1776                             tcx.mk_array(tcx.types.usize, 3),
1777                         )
1778                         /* FIXME: use actual fn pointers
1779                         Warning: naively computing the number of entries in the
1780                         vtable by counting the methods on the trait + methods on
1781                         all parent traits does not work, because some methods can
1782                         be not object safe and thus excluded from the vtable.
1783                         Increase this counter if you tried to implement this but
1784                         failed to do it without duplicating a lot of code from
1785                         other places in the compiler: 2
1786                         tcx.mk_tup(&[
1787                             tcx.mk_array(tcx.types.usize, 3),
1788                             tcx.mk_array(Option<fn()>),
1789                         ])
1790                         */
1791                     }
1792                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1793                 }
1794             }
1795
1796             // Arrays and slices.
1797             ty::Array(element, _) |
1798             ty::Slice(element) => element,
1799             ty::Str => tcx.types.u8,
1800
1801             // Tuples, generators and closures.
1802             ty::Closure(def_id, ref substs) => {
1803                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1804             }
1805
1806             ty::Generator(def_id, ref substs, _) => {
1807                 match this.variants {
1808                     Variants::Single { index } => {
1809                         substs.state_tys(def_id, tcx)
1810                             .nth(index.as_usize()).unwrap()
1811                             .nth(i).unwrap()
1812                     }
1813                     Variants::Multiple { ref discr, discr_index, .. } => {
1814                         if i == discr_index {
1815                             return discr_layout(discr);
1816                         }
1817                         substs.prefix_tys(def_id, tcx).nth(i).unwrap()
1818                     }
1819                 }
1820             }
1821
1822             ty::Tuple(tys) => tys[i].expect_ty(),
1823
1824             // SIMD vector types.
1825             ty::Adt(def, ..) if def.repr.simd() => {
1826                 this.ty.simd_type(tcx)
1827             }
1828
1829             // ADTs.
1830             ty::Adt(def, substs) => {
1831                 match this.variants {
1832                     Variants::Single { index } => {
1833                         def.variants[index].fields[i].ty(tcx, substs)
1834                     }
1835
1836                     // Discriminant field for enums (where applicable).
1837                     Variants::Multiple { ref discr, .. } => {
1838                         assert_eq!(i, 0);
1839                         return discr_layout(discr);
1840                     }
1841                 }
1842             }
1843
1844             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1845             ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
1846             ty::Error => {
1847                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1848             }
1849         })
1850     }
1851
1852     fn pointee_info_at(
1853         this: TyLayout<'tcx>,
1854         cx: &C,
1855         offset: Size,
1856     ) -> Option<PointeeInfo> {
1857         match this.ty.sty {
1858             ty::RawPtr(mt) if offset.bytes() == 0 => {
1859                 cx.layout_of(mt.ty).to_result().ok()
1860                     .map(|layout| PointeeInfo {
1861                         size: layout.size,
1862                         align: layout.align.abi,
1863                         safe: None,
1864                     })
1865             }
1866
1867             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
1868                 let tcx = cx.tcx();
1869                 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
1870                 let kind = match mt {
1871                     hir::MutImmutable => if is_freeze {
1872                         PointerKind::Frozen
1873                     } else {
1874                         PointerKind::Shared
1875                     },
1876                     hir::MutMutable => {
1877                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
1878                         // panic=abort mode. That was deemed right, as prior versions had many bugs
1879                         // in conjunction with unwinding, but later versions didn’t seem to have
1880                         // said issues. See issue #31681.
1881                         //
1882                         // Alas, later on we encountered a case where noalias would generate wrong
1883                         // code altogether even with recent versions of LLVM in *safe* code with no
1884                         // unwinding involved. See #54462.
1885                         //
1886                         // For now, do not enable mutable_noalias by default at all, while the
1887                         // issue is being figured out.
1888                         let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
1889                             .unwrap_or(false);
1890                         if mutable_noalias {
1891                             PointerKind::UniqueBorrowed
1892                         } else {
1893                             PointerKind::Shared
1894                         }
1895                     }
1896                 };
1897
1898                 cx.layout_of(ty).to_result().ok()
1899                     .map(|layout| PointeeInfo {
1900                         size: layout.size,
1901                         align: layout.align.abi,
1902                         safe: Some(kind),
1903                     })
1904             }
1905
1906             _ => {
1907                 let mut data_variant = match this.variants {
1908                     // Within the discriminant field, only the niche itself is
1909                     // always initialized, so we only check for a pointer at its
1910                     // offset.
1911                     //
1912                     // If the niche is a pointer, it's either valid (according
1913                     // to its type), or null (which the niche field's scalar
1914                     // validity range encodes).  This allows using
1915                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
1916                     // this will continue to work as long as we don't start
1917                     // using more niches than just null (e.g., the first page of
1918                     // the address space, or unaligned pointers).
1919                     Variants::Multiple {
1920                         discr_kind: DiscriminantKind::Niche {
1921                             dataful_variant,
1922                             ..
1923                         },
1924                         discr_index,
1925                         ..
1926                     } if this.fields.offset(discr_index) == offset =>
1927                         Some(this.for_variant(cx, dataful_variant)),
1928                     _ => Some(this),
1929                 };
1930
1931                 if let Some(variant) = data_variant {
1932                     // We're not interested in any unions.
1933                     if let FieldPlacement::Union(_) = variant.fields {
1934                         data_variant = None;
1935                     }
1936                 }
1937
1938                 let mut result = None;
1939
1940                 if let Some(variant) = data_variant {
1941                     let ptr_end = offset + Pointer.size(cx);
1942                     for i in 0..variant.fields.count() {
1943                         let field_start = variant.fields.offset(i);
1944                         if field_start <= offset {
1945                             let field = variant.field(cx, i);
1946                             result = field.to_result().ok()
1947                                 .and_then(|field| {
1948                                     if ptr_end <= field_start + field.size {
1949                                         // We found the right field, look inside it.
1950                                         field.pointee_info_at(cx, offset - field_start)
1951                                     } else {
1952                                         None
1953                                     }
1954                                 });
1955                             if result.is_some() {
1956                                 break;
1957                             }
1958                         }
1959                     }
1960                 }
1961
1962                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
1963                 if let Some(ref mut pointee) = result {
1964                     if let ty::Adt(def, _) = this.ty.sty {
1965                         if def.is_box() && offset.bytes() == 0 {
1966                             pointee.safe = Some(PointerKind::UniqueOwned);
1967                         }
1968                     }
1969                 }
1970
1971                 result
1972             }
1973         }
1974     }
1975 }
1976
1977 struct Niche {
1978     offset: Size,
1979     scalar: Scalar,
1980     available: u128,
1981 }
1982
1983 impl Niche {
1984     fn reserve<'a, 'tcx>(
1985         &self,
1986         cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1987         count: u128,
1988     ) -> Option<(u128, Scalar)> {
1989         if count > self.available {
1990             return None;
1991         }
1992         let Scalar { value, valid_range: ref v } = self.scalar;
1993         let bits = value.size(cx).bits();
1994         assert!(bits <= 128);
1995         let max_value = !0u128 >> (128 - bits);
1996         let start = v.end().wrapping_add(1) & max_value;
1997         let end = v.end().wrapping_add(count) & max_value;
1998         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1999     }
2000 }
2001
2002 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
2003     /// Find the offset of a niche leaf field, starting from
2004     /// the given type and recursing through aggregates.
2005     // FIXME(eddyb) traverse already optimized enums.
2006     fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
2007         let scalar_niche = |scalar: &Scalar, offset| {
2008             let Scalar { value, valid_range: ref v } = *scalar;
2009
2010             let bits = value.size(self).bits();
2011             assert!(bits <= 128);
2012             let max_value = !0u128 >> (128 - bits);
2013
2014             // Find out how many values are outside the valid range.
2015             let available = if v.start() <= v.end() {
2016                 v.start() + (max_value - v.end())
2017             } else {
2018                 v.start() - v.end() - 1
2019             };
2020
2021             // Give up if there is no niche value available.
2022             if available == 0 {
2023                 return None;
2024             }
2025
2026             Some(Niche { offset, scalar: scalar.clone(), available })
2027         };
2028
2029         // Locals variables which live across yields are stored
2030         // in the generator type as fields. These may be uninitialized
2031         // so we don't look for niches there.
2032         if let ty::Generator(..) = layout.ty.sty {
2033             return Ok(None);
2034         }
2035
2036         match layout.abi {
2037             Abi::Scalar(ref scalar) => {
2038                 return Ok(scalar_niche(scalar, Size::ZERO));
2039             }
2040             Abi::ScalarPair(ref a, ref b) => {
2041                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
2042                 // returns the last maximum.
2043                 let niche = iter::once(
2044                     (b, a.value.size(self).align_to(b.value.align(self).abi))
2045                 )
2046                     .chain(iter::once((a, Size::ZERO)))
2047                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
2048                     .max_by_key(|niche| niche.available);
2049                 return Ok(niche);
2050             }
2051             Abi::Vector { ref element, .. } => {
2052                 return Ok(scalar_niche(element, Size::ZERO));
2053             }
2054             _ => {}
2055         }
2056
2057         // Perhaps one of the fields is non-zero, let's recurse and find out.
2058         if let FieldPlacement::Union(_) = layout.fields {
2059             // Only Rust enums have safe-to-inspect fields
2060             // (a discriminant), other unions are unsafe.
2061             if let Variants::Single { .. } = layout.variants {
2062                 return Ok(None);
2063             }
2064         }
2065         if let FieldPlacement::Array { count: original_64_bit_count, .. } = layout.fields {
2066             // rust-lang/rust#57038: avoid ICE within FieldPlacement::count when count too big
2067             if original_64_bit_count > usize::max_value() as u64 {
2068                 return Err(LayoutError::SizeOverflow(layout.ty));
2069             }
2070             if layout.fields.count() > 0 {
2071                 return self.find_niche(layout.field(self, 0)?);
2072             } else {
2073                 return Ok(None);
2074             }
2075         }
2076         let mut niche = None;
2077         let mut available = 0;
2078         for i in 0..layout.fields.count() {
2079             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
2080                 if c.available > available {
2081                     available = c.available;
2082                     c.offset += layout.fields.offset(i);
2083                     niche = Some(c);
2084                 }
2085             }
2086         }
2087         Ok(niche)
2088     }
2089 }
2090
2091 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
2092     fn hash_stable<W: StableHasherResult>(&self,
2093                                           hcx: &mut StableHashingContext<'a>,
2094                                           hasher: &mut StableHasher<W>) {
2095         use crate::ty::layout::Variants::*;
2096         mem::discriminant(self).hash_stable(hcx, hasher);
2097
2098         match *self {
2099             Single { index } => {
2100                 index.hash_stable(hcx, hasher);
2101             }
2102             Multiple {
2103                 ref discr,
2104                 ref discr_kind,
2105                 discr_index,
2106                 ref variants,
2107             } => {
2108                 discr.hash_stable(hcx, hasher);
2109                 discr_kind.hash_stable(hcx, hasher);
2110                 discr_index.hash_stable(hcx, hasher);
2111                 variants.hash_stable(hcx, hasher);
2112             }
2113         }
2114     }
2115 }
2116
2117 impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2118     fn hash_stable<W: StableHasherResult>(&self,
2119                                           hcx: &mut StableHashingContext<'a>,
2120                                           hasher: &mut StableHasher<W>) {
2121         use crate::ty::layout::DiscriminantKind::*;
2122         mem::discriminant(self).hash_stable(hcx, hasher);
2123
2124         match *self {
2125             Tag => {}
2126             Niche {
2127                 dataful_variant,
2128                 ref niche_variants,
2129                 niche_start,
2130             } => {
2131                 dataful_variant.hash_stable(hcx, hasher);
2132                 niche_variants.start().hash_stable(hcx, hasher);
2133                 niche_variants.end().hash_stable(hcx, hasher);
2134                 niche_start.hash_stable(hcx, hasher);
2135             }
2136         }
2137     }
2138 }
2139
2140 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
2141     fn hash_stable<W: StableHasherResult>(&self,
2142                                           hcx: &mut StableHashingContext<'a>,
2143                                           hasher: &mut StableHasher<W>) {
2144         use crate::ty::layout::FieldPlacement::*;
2145         mem::discriminant(self).hash_stable(hcx, hasher);
2146
2147         match *self {
2148             Union(count) => {
2149                 count.hash_stable(hcx, hasher);
2150             }
2151             Array { count, stride } => {
2152                 count.hash_stable(hcx, hasher);
2153                 stride.hash_stable(hcx, hasher);
2154             }
2155             Arbitrary { ref offsets, ref memory_index } => {
2156                 offsets.hash_stable(hcx, hasher);
2157                 memory_index.hash_stable(hcx, hasher);
2158             }
2159         }
2160     }
2161 }
2162
2163 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2164     fn hash_stable<W: StableHasherResult>(
2165         &self,
2166         hcx: &mut StableHashingContext<'a>,
2167         hasher: &mut StableHasher<W>,
2168     ) {
2169         self.as_u32().hash_stable(hcx, hasher)
2170     }
2171 }
2172
2173 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
2174     fn hash_stable<W: StableHasherResult>(&self,
2175                                           hcx: &mut StableHashingContext<'a>,
2176                                           hasher: &mut StableHasher<W>) {
2177         use crate::ty::layout::Abi::*;
2178         mem::discriminant(self).hash_stable(hcx, hasher);
2179
2180         match *self {
2181             Uninhabited => {}
2182             Scalar(ref value) => {
2183                 value.hash_stable(hcx, hasher);
2184             }
2185             ScalarPair(ref a, ref b) => {
2186                 a.hash_stable(hcx, hasher);
2187                 b.hash_stable(hcx, hasher);
2188             }
2189             Vector { ref element, count } => {
2190                 element.hash_stable(hcx, hasher);
2191                 count.hash_stable(hcx, hasher);
2192             }
2193             Aggregate { sized } => {
2194                 sized.hash_stable(hcx, hasher);
2195             }
2196         }
2197     }
2198 }
2199
2200 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
2201     fn hash_stable<W: StableHasherResult>(&self,
2202                                           hcx: &mut StableHashingContext<'a>,
2203                                           hasher: &mut StableHasher<W>) {
2204         let Scalar { value, ref valid_range } = *self;
2205         value.hash_stable(hcx, hasher);
2206         valid_range.start().hash_stable(hcx, hasher);
2207         valid_range.end().hash_stable(hcx, hasher);
2208     }
2209 }
2210
2211 impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
2212     variants,
2213     fields,
2214     abi,
2215     size,
2216     align
2217 });
2218
2219 impl_stable_hash_for!(enum crate::ty::layout::Integer {
2220     I8,
2221     I16,
2222     I32,
2223     I64,
2224     I128
2225 });
2226
2227 impl_stable_hash_for!(enum crate::ty::layout::Primitive {
2228     Int(integer, signed),
2229     Float(fty),
2230     Pointer
2231 });
2232
2233 impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
2234     abi,
2235     pref
2236 });
2237
2238 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
2239     fn hash_stable<W: StableHasherResult>(&self,
2240                                           hcx: &mut StableHashingContext<'gcx>,
2241                                           hasher: &mut StableHasher<W>) {
2242         self.bytes().hash_stable(hcx, hasher);
2243     }
2244 }
2245
2246 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2247     fn hash_stable<W: StableHasherResult>(&self,
2248                                           hcx: &mut StableHashingContext<'gcx>,
2249                                           hasher: &mut StableHasher<W>) {
2250         self.bytes().hash_stable(hcx, hasher);
2251     }
2252 }
2253
2254 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2255 {
2256     fn hash_stable<W: StableHasherResult>(&self,
2257                                           hcx: &mut StableHashingContext<'a>,
2258                                           hasher: &mut StableHasher<W>) {
2259         use crate::ty::layout::LayoutError::*;
2260         mem::discriminant(self).hash_stable(hcx, hasher);
2261
2262         match *self {
2263             Unknown(t) |
2264             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2265         }
2266     }
2267 }
2268
2269 pub trait FnTypeExt<'tcx, C>
2270 where
2271     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2272         + HasDataLayout
2273         + HasTargetSpec
2274         + HasTyCtxt<'tcx>
2275         + HasParamEnv<'tcx>,
2276 {
2277     fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self;
2278     fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2279     fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2280     fn new_internal(
2281         cx: &C,
2282         sig: ty::FnSig<'tcx>,
2283         extra_args: &[Ty<'tcx>],
2284         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2285     ) -> Self;
2286     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2287 }
2288
2289 impl<'tcx, C> FnTypeExt<'tcx, C> for call::FnType<'tcx, Ty<'tcx>>
2290 where
2291     C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2292         + HasDataLayout
2293         + HasTargetSpec
2294         + HasTyCtxt<'tcx>
2295         + HasParamEnv<'tcx>,
2296 {
2297     fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self {
2298         let sig = instance.fn_sig(cx.tcx());
2299         let sig = cx
2300             .tcx()
2301             .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2302         call::FnType::new(cx, sig, &[])
2303     }
2304
2305     fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2306         call::FnType::new_internal(cx, sig, extra_args, |ty, _| ArgType::new(cx.layout_of(ty)))
2307     }
2308
2309     fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2310         FnTypeExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
2311             let mut layout = cx.layout_of(ty);
2312             // Don't pass the vtable, it's not an argument of the virtual fn.
2313             // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2314             // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2315             if arg_idx == Some(0) {
2316                 let fat_pointer_ty = if layout.is_unsized() {
2317                     // unsized `self` is passed as a pointer to `self`
2318                     // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2319                     cx.tcx().mk_mut_ptr(layout.ty)
2320                 } else {
2321                     match layout.abi {
2322                         Abi::ScalarPair(..) => (),
2323                         _ => bug!("receiver type has unsupported layout: {:?}", layout),
2324                     }
2325
2326                     // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2327                     // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2328                     // elsewhere in the compiler as a method on a `dyn Trait`.
2329                     // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2330                     // get a built-in pointer type
2331                     let mut fat_pointer_layout = layout;
2332                     'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2333                         && !fat_pointer_layout.ty.is_region_ptr()
2334                     {
2335                         'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
2336                             let field_layout = fat_pointer_layout.field(cx, i);
2337
2338                             if !field_layout.is_zst() {
2339                                 fat_pointer_layout = field_layout;
2340                                 continue 'descend_newtypes;
2341                             }
2342                         }
2343
2344                         bug!(
2345                             "receiver has no non-zero-sized fields {:?}",
2346                             fat_pointer_layout
2347                         );
2348                     }
2349
2350                     fat_pointer_layout.ty
2351                 };
2352
2353                 // we now have a type like `*mut RcBox<dyn Trait>`
2354                 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2355                 // this is understood as a special case elsewhere in the compiler
2356                 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2357                 layout = cx.layout_of(unit_pointer_ty);
2358                 layout.ty = fat_pointer_ty;
2359             }
2360             ArgType::new(layout)
2361         })
2362     }
2363
2364     fn new_internal(
2365         cx: &C,
2366         sig: ty::FnSig<'tcx>,
2367         extra_args: &[Ty<'tcx>],
2368         mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2369     ) -> Self {
2370         debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
2371
2372         use rustc_target::spec::abi::Abi::*;
2373         let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2374             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C,
2375
2376             // It's the ABI's job to select this, not ours.
2377             System => bug!("system abi should be selected elsewhere"),
2378
2379             Stdcall => Conv::X86Stdcall,
2380             Fastcall => Conv::X86Fastcall,
2381             Vectorcall => Conv::X86VectorCall,
2382             Thiscall => Conv::X86ThisCall,
2383             C => Conv::C,
2384             Unadjusted => Conv::C,
2385             Win64 => Conv::X86_64Win64,
2386             SysV64 => Conv::X86_64SysV,
2387             Aapcs => Conv::ArmAapcs,
2388             PtxKernel => Conv::PtxKernel,
2389             Msp430Interrupt => Conv::Msp430Intr,
2390             X86Interrupt => Conv::X86Intr,
2391             AmdGpuKernel => Conv::AmdGpuKernel,
2392
2393             // These API constants ought to be more specific...
2394             Cdecl => Conv::C,
2395         };
2396
2397         let mut inputs = sig.inputs();
2398         let extra_args = if sig.abi == RustCall {
2399             assert!(!sig.c_variadic && extra_args.is_empty());
2400
2401             match sig.inputs().last().unwrap().sty {
2402                 ty::Tuple(tupled_arguments) => {
2403                     inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2404                     tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2405                 }
2406                 _ => {
2407                     bug!(
2408                         "argument to function with \"rust-call\" ABI \
2409                          is not a tuple"
2410                     );
2411                 }
2412             }
2413         } else {
2414             assert!(sig.c_variadic || extra_args.is_empty());
2415             extra_args.to_vec()
2416         };
2417
2418         let target = &cx.tcx().sess.target.target;
2419         let win_x64_gnu =
2420             target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2421         let linux_s390x =
2422             target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu";
2423         let linux_sparc64 =
2424             target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu";
2425         let rust_abi = match sig.abi {
2426             RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2427             _ => false,
2428         };
2429
2430         // Handle safe Rust thin and fat pointers.
2431         let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2432                                       scalar: &Scalar,
2433                                       layout: TyLayout<'tcx>,
2434                                       offset: Size,
2435                                       is_return: bool| {
2436             // Booleans are always an i1 that needs to be zero-extended.
2437             if scalar.is_bool() {
2438                 attrs.set(ArgAttribute::ZExt);
2439                 return;
2440             }
2441
2442             // Only pointer types handled below.
2443             if scalar.value != Pointer {
2444                 return;
2445             }
2446
2447             if scalar.valid_range.start() < scalar.valid_range.end() {
2448                 if *scalar.valid_range.start() > 0 {
2449                     attrs.set(ArgAttribute::NonNull);
2450                 }
2451             }
2452
2453             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2454                 if let Some(kind) = pointee.safe {
2455                     attrs.pointee_size = pointee.size;
2456                     attrs.pointee_align = Some(pointee.align);
2457
2458                     // `Box` pointer parameters never alias because ownership is transferred
2459                     // `&mut` pointer parameters never alias other parameters,
2460                     // or mutable global data
2461                     //
2462                     // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2463                     // and can be marked as both `readonly` and `noalias`, as
2464                     // LLVM's definition of `noalias` is based solely on memory
2465                     // dependencies rather than pointer equality
2466                     let no_alias = match kind {
2467                         PointerKind::Shared => false,
2468                         PointerKind::UniqueOwned => true,
2469                         PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2470                     };
2471                     if no_alias {
2472                         attrs.set(ArgAttribute::NoAlias);
2473                     }
2474
2475                     if kind == PointerKind::Frozen && !is_return {
2476                         attrs.set(ArgAttribute::ReadOnly);
2477                     }
2478                 }
2479             }
2480         };
2481
2482         // Store the index of the last argument. This is useful for working with
2483         // C-compatible variadic arguments.
2484         let last_arg_idx = if sig.inputs().is_empty() {
2485             None
2486         } else {
2487             Some(sig.inputs().len() - 1)
2488         };
2489
2490         let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2491             let is_return = arg_idx.is_none();
2492             let mut arg = mk_arg_type(ty, arg_idx);
2493             if arg.layout.is_zst() {
2494                 // For some forsaken reason, x86_64-pc-windows-gnu
2495                 // doesn't ignore zero-sized struct arguments.
2496                 // The same is true for s390x-unknown-linux-gnu
2497                 // and sparc64-unknown-linux-gnu.
2498                 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
2499                     arg.mode = PassMode::Ignore(IgnoreMode::Zst);
2500                 }
2501             }
2502
2503             // If this is a C-variadic function, this is not the return value,
2504             // and there is one or more fixed arguments; ensure that the `VaList`
2505             // is ignored as an argument.
2506             if sig.c_variadic {
2507                 match (last_arg_idx, arg_idx) {
2508                     (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => {
2509                         let va_list_did = match cx.tcx().lang_items().va_list() {
2510                             Some(did) => did,
2511                             None => bug!("`va_list` lang item required for C-variadic functions"),
2512                         };
2513                         match ty.sty {
2514                             ty::Adt(def, _) if def.did == va_list_did => {
2515                                 // This is the "spoofed" `VaList`. Set the arguments mode
2516                                 // so that it will be ignored.
2517                                 arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs);
2518                             }
2519                             _ => (),
2520                         }
2521                     }
2522                     _ => {}
2523                 }
2524             }
2525
2526             // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2527             if !is_return && rust_abi {
2528                 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2529                     let mut a_attrs = ArgAttributes::new();
2530                     let mut b_attrs = ArgAttributes::new();
2531                     adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2532                     adjust_for_rust_scalar(
2533                         &mut b_attrs,
2534                         b,
2535                         arg.layout,
2536                         a.value.size(cx).align_to(b.value.align(cx).abi),
2537                         false,
2538                     );
2539                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
2540                     return arg;
2541                 }
2542             }
2543
2544             if let Abi::Scalar(ref scalar) = arg.layout.abi {
2545                 if let PassMode::Direct(ref mut attrs) = arg.mode {
2546                     adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2547                 }
2548             }
2549
2550             arg
2551         };
2552
2553         let mut fn_ty = FnType {
2554             ret: arg_of(sig.output(), None),
2555             args: inputs
2556                 .iter()
2557                 .cloned()
2558                 .chain(extra_args)
2559                 .enumerate()
2560                 .map(|(i, ty)| arg_of(ty, Some(i)))
2561                 .collect(),
2562             c_variadic: sig.c_variadic,
2563             conv,
2564         };
2565         fn_ty.adjust_for_abi(cx, sig.abi);
2566         fn_ty
2567     }
2568
2569     fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2570         if abi == SpecAbi::Unadjusted {
2571             return;
2572         }
2573
2574         if abi == SpecAbi::Rust
2575             || abi == SpecAbi::RustCall
2576             || abi == SpecAbi::RustIntrinsic
2577             || abi == SpecAbi::PlatformIntrinsic
2578         {
2579             let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
2580                 if arg.is_ignore() {
2581                     return;
2582                 }
2583
2584                 match arg.layout.abi {
2585                     Abi::Aggregate { .. } => {}
2586
2587                     // This is a fun case! The gist of what this is doing is
2588                     // that we want callers and callees to always agree on the
2589                     // ABI of how they pass SIMD arguments. If we were to *not*
2590                     // make these arguments indirect then they'd be immediates
2591                     // in LLVM, which means that they'd used whatever the
2592                     // appropriate ABI is for the callee and the caller. That
2593                     // means, for example, if the caller doesn't have AVX
2594                     // enabled but the callee does, then passing an AVX argument
2595                     // across this boundary would cause corrupt data to show up.
2596                     //
2597                     // This problem is fixed by unconditionally passing SIMD
2598                     // arguments through memory between callers and callees
2599                     // which should get them all to agree on ABI regardless of
2600                     // target feature sets. Some more information about this
2601                     // issue can be found in #44367.
2602                     //
2603                     // Note that the platform intrinsic ABI is exempt here as
2604                     // that's how we connect up to LLVM and it's unstable
2605                     // anyway, we control all calls to it in libstd.
2606                     Abi::Vector { .. }
2607                         if abi != SpecAbi::PlatformIntrinsic
2608                             && cx.tcx().sess.target.target.options.simd_types_indirect =>
2609                     {
2610                         arg.make_indirect();
2611                         return;
2612                     }
2613
2614                     _ => return,
2615                 }
2616
2617                 let size = arg.layout.size;
2618                 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2619                     arg.make_indirect();
2620                 } else {
2621                     // We want to pass small aggregates as immediates, but using
2622                     // a LLVM aggregate type for this leads to bad optimizations,
2623                     // so we pick an appropriately sized integer type instead.
2624                     arg.cast_to(Reg {
2625                         kind: RegKind::Integer,
2626                         size,
2627                     });
2628                 }
2629             };
2630             fixup(&mut self.ret);
2631             for arg in &mut self.args {
2632                 fixup(arg);
2633             }
2634             if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2635                 attrs.set(ArgAttribute::StructRet);
2636             }
2637             return;
2638         }
2639
2640         if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2641             cx.tcx().sess.fatal(&msg);
2642         }
2643     }
2644 }