]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
move raw span to tt reader
[rust.git] / src / librustc / ty / layout.rs
1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
3
4 use syntax::ast::{self, Ident, IntTy, UintTy};
5 use syntax::attr;
6 use syntax_pos::DUMMY_SP;
7
8 use std::cmp;
9 use std::fmt;
10 use std::i128;
11 use std::iter;
12 use std::mem;
13 use std::ops::Bound;
14
15 use crate::hir;
16 use crate::ich::StableHashingContext;
17 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
18 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
19                                            StableHasherResult};
20
21 pub use rustc_target::abi::*;
22
23 pub trait IntegerExt {
24     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
25     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
26     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
27                             ty: Ty<'tcx>,
28                             repr: &ReprOptions,
29                             min: i128,
30                             max: i128)
31                             -> (Integer, bool);
32 }
33
34 impl IntegerExt for Integer {
35     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
36         match (*self, signed) {
37             (I8, false) => tcx.types.u8,
38             (I16, false) => tcx.types.u16,
39             (I32, false) => tcx.types.u32,
40             (I64, false) => tcx.types.u64,
41             (I128, false) => tcx.types.u128,
42             (I8, true) => tcx.types.i8,
43             (I16, true) => tcx.types.i16,
44             (I32, true) => tcx.types.i32,
45             (I64, true) => tcx.types.i64,
46             (I128, true) => tcx.types.i128,
47         }
48     }
49
50     /// Gets the Integer type from an attr::IntType.
51     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
52         let dl = cx.data_layout();
53
54         match ity {
55             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
56             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
57             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
58             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
59             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
60             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
61                 dl.ptr_sized_integer()
62             }
63         }
64     }
65
66     /// Finds the appropriate Integer type and signedness for the given
67     /// signed discriminant range and #[repr] attribute.
68     /// N.B.: u128 values above i128::MAX will be treated as signed, but
69     /// that shouldn't affect anything, other than maybe debuginfo.
70     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
71                             ty: Ty<'tcx>,
72                             repr: &ReprOptions,
73                             min: i128,
74                             max: i128)
75                             -> (Integer, bool) {
76         // Theoretically, negative values could be larger in unsigned representation
77         // than the unsigned representation of the signed minimum. However, if there
78         // are any negative values, the only valid unsigned representation is u128
79         // which can fit all i128 values, so the result remains unaffected.
80         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
81         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
82
83         let mut min_from_extern = None;
84         let min_default = I8;
85
86         if let Some(ity) = repr.int {
87             let discr = Integer::from_attr(&tcx, ity);
88             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
89             if discr < fit {
90                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
91                       discriminant range of enum `{}", ty)
92             }
93             return (discr, ity.is_signed());
94         }
95
96         if repr.c() {
97             match &tcx.sess.target.target.arch[..] {
98                 // WARNING: the ARM EABI has two variants; the one corresponding
99                 // to `at_least == I32` appears to be used on Linux and NetBSD,
100                 // but some systems may use the variant corresponding to no
101                 // lower bound. However, we don't run on those yet...?
102                 "arm" => min_from_extern = Some(I32),
103                 _ => min_from_extern = Some(I32),
104             }
105         }
106
107         let at_least = min_from_extern.unwrap_or(min_default);
108
109         // If there are no negative values, we can use the unsigned fit.
110         if min >= 0 {
111             (cmp::max(unsigned_fit, at_least), false)
112         } else {
113             (cmp::max(signed_fit, at_least), true)
114         }
115     }
116 }
117
118 pub trait PrimitiveExt {
119     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
120 }
121
122 impl PrimitiveExt for Primitive {
123     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
124         match *self {
125             Int(i, signed) => i.to_ty(tcx, signed),
126             Float(FloatTy::F32) => tcx.types.f32,
127             Float(FloatTy::F64) => tcx.types.f64,
128             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
129         }
130     }
131 }
132
133 /// The first half of a fat pointer.
134 ///
135 /// - For a trait object, this is the address of the box.
136 /// - For a slice, this is the base address.
137 pub const FAT_PTR_ADDR: usize = 0;
138
139 /// The second half of a fat pointer.
140 ///
141 /// - For a trait object, this is the address of the vtable.
142 /// - For a slice, this is the length.
143 pub const FAT_PTR_EXTRA: usize = 1;
144
145 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
146 pub enum LayoutError<'tcx> {
147     Unknown(Ty<'tcx>),
148     SizeOverflow(Ty<'tcx>)
149 }
150
151 impl<'tcx> fmt::Display for LayoutError<'tcx> {
152     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
153         match *self {
154             LayoutError::Unknown(ty) => {
155                 write!(f, "the type `{:?}` has an unknown layout", ty)
156             }
157             LayoutError::SizeOverflow(ty) => {
158                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
159             }
160         }
161     }
162 }
163
164 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
165                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
166                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
167 {
168     ty::tls::with_related_context(tcx, move |icx| {
169         let rec_limit = *tcx.sess.recursion_limit.get();
170         let (param_env, ty) = query.into_parts();
171
172         if icx.layout_depth > rec_limit {
173             tcx.sess.fatal(
174                 &format!("overflow representing the type `{}`", ty));
175         }
176
177         // Update the ImplicitCtxt to increase the layout_depth
178         let icx = ty::tls::ImplicitCtxt {
179             layout_depth: icx.layout_depth + 1,
180             ..icx.clone()
181         };
182
183         ty::tls::enter_context(&icx, |_| {
184             let cx = LayoutCx { tcx, param_env };
185             let layout = cx.layout_raw_uncached(ty);
186             // Type-level uninhabitedness should always imply ABI uninhabitedness.
187             if let Ok(layout) = layout {
188                 if ty.conservative_is_privately_uninhabited(tcx) {
189                     assert!(layout.abi.is_uninhabited());
190                 }
191             }
192             layout
193         })
194     })
195 }
196
197 pub fn provide(providers: &mut ty::query::Providers<'_>) {
198     *providers = ty::query::Providers {
199         layout_raw,
200         ..*providers
201     };
202 }
203
204 pub struct LayoutCx<'tcx, C> {
205     pub tcx: C,
206     pub param_env: ty::ParamEnv<'tcx>,
207 }
208
209 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
210     fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
211         let tcx = self.tcx;
212         let param_env = self.param_env;
213         let dl = self.data_layout();
214         let scalar_unit = |value: Primitive| {
215             let bits = value.size(dl).bits();
216             assert!(bits <= 128);
217             Scalar {
218                 value,
219                 valid_range: 0..=(!0 >> (128 - bits))
220             }
221         };
222         let scalar = |value: Primitive| {
223             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
224         };
225         let scalar_pair = |a: Scalar, b: Scalar| {
226             let b_align = b.value.align(dl);
227             let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
228             let b_offset = a.value.size(dl).align_to(b_align.abi);
229             let size = (b_offset + b.value.size(dl)).align_to(align.abi);
230             LayoutDetails {
231                 variants: Variants::Single { index: VariantIdx::new(0) },
232                 fields: FieldPlacement::Arbitrary {
233                     offsets: vec![Size::ZERO, b_offset],
234                     memory_index: vec![0, 1]
235                 },
236                 abi: Abi::ScalarPair(a, b),
237                 align,
238                 size
239             }
240         };
241
242         #[derive(Copy, Clone, Debug)]
243         enum StructKind {
244             /// A tuple, closure, or univariant which cannot be coerced to unsized.
245             AlwaysSized,
246             /// A univariant, the last field of which may be coerced to unsized.
247             MaybeUnsized,
248             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
249             Prefixed(Size, Align),
250         }
251
252         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
253             let packed = repr.packed();
254             if packed && repr.align > 0 {
255                 bug!("struct cannot be packed and aligned");
256             }
257
258             let pack = Align::from_bytes(repr.pack as u64).unwrap();
259
260             let mut align = if packed {
261                 dl.i8_align
262             } else {
263                 dl.aggregate_align
264             };
265
266             let mut sized = true;
267             let mut offsets = vec![Size::ZERO; fields.len()];
268             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
269
270             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
271             if let StructKind::Prefixed(_, align) = kind {
272                 optimize &= align.bytes() == 1;
273             }
274
275             if optimize {
276                 let end = if let StructKind::MaybeUnsized = kind {
277                     fields.len() - 1
278                 } else {
279                     fields.len()
280                 };
281                 let optimizing = &mut inverse_memory_index[..end];
282                 let field_align = |f: &TyLayout<'_>| {
283                     if packed { f.align.abi.min(pack) } else { f.align.abi }
284                 };
285                 match kind {
286                     StructKind::AlwaysSized |
287                     StructKind::MaybeUnsized => {
288                         optimizing.sort_by_key(|&x| {
289                             // Place ZSTs first to avoid "interesting offsets",
290                             // especially with only one or two non-ZST fields.
291                             let f = &fields[x as usize];
292                             (!f.is_zst(), cmp::Reverse(field_align(f)))
293                         });
294                     }
295                     StructKind::Prefixed(..) => {
296                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
297                     }
298                 }
299             }
300
301             // inverse_memory_index holds field indices by increasing memory offset.
302             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
303             // We now write field offsets to the corresponding offset slot;
304             // field 5 with offset 0 puts 0 in offsets[5].
305             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
306
307             let mut offset = Size::ZERO;
308
309             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
310                 let prefix_align = if packed {
311                     prefix_align.min(pack)
312                 } else {
313                     prefix_align
314                 };
315                 align = align.max(AbiAndPrefAlign::new(prefix_align));
316                 offset = prefix_size.align_to(prefix_align);
317             }
318
319             for &i in &inverse_memory_index {
320                 let field = fields[i as usize];
321                 if !sized {
322                     bug!("univariant: field #{} of `{}` comes after unsized field",
323                          offsets.len(), ty);
324                 }
325
326                 if field.is_unsized() {
327                     sized = false;
328                 }
329
330                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
331                 let field_align = if packed {
332                     field.align.min(AbiAndPrefAlign::new(pack))
333                 } else {
334                     field.align
335                 };
336                 offset = offset.align_to(field_align.abi);
337                 align = align.max(field_align);
338
339                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
340                 offsets[i as usize] = offset;
341
342                 offset = offset.checked_add(field.size, dl)
343                     .ok_or(LayoutError::SizeOverflow(ty))?;
344             }
345
346             if repr.align > 0 {
347                 let repr_align = repr.align as u64;
348                 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
349                 debug!("univariant repr_align: {:?}", repr_align);
350             }
351
352             debug!("univariant min_size: {:?}", offset);
353             let min_size = offset;
354
355             // As stated above, inverse_memory_index holds field indices by increasing offset.
356             // This makes it an already-sorted view of the offsets vec.
357             // To invert it, consider:
358             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
359             // Field 5 would be the first element, so memory_index is i:
360             // Note: if we didn't optimize, it's already right.
361
362             let mut memory_index;
363             if optimize {
364                 memory_index = vec![0; inverse_memory_index.len()];
365
366                 for i in 0..inverse_memory_index.len() {
367                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
368                 }
369             } else {
370                 memory_index = inverse_memory_index;
371             }
372
373             let size = min_size.align_to(align.abi);
374             let mut abi = Abi::Aggregate { sized };
375
376             // Unpack newtype ABIs and find scalar pairs.
377             if sized && size.bytes() > 0 {
378                 // All other fields must be ZSTs, and we need them to all start at 0.
379                 let mut zst_offsets =
380                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
381                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
382                     let mut non_zst_fields =
383                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
384
385                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
386                         // We have exactly one non-ZST field.
387                         (Some((i, field)), None, None) => {
388                             // Field fills the struct and it has a scalar or scalar pair ABI.
389                             if offsets[i].bytes() == 0 &&
390                                align.abi == field.align.abi &&
391                                size == field.size {
392                                 match field.abi {
393                                     // For plain scalars, or vectors of them, we can't unpack
394                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
395                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
396                                         abi = field.abi.clone();
397                                     }
398                                     // But scalar pairs are Rust-specific and get
399                                     // treated as aggregates by C ABIs anyway.
400                                     Abi::ScalarPair(..) => {
401                                         abi = field.abi.clone();
402                                     }
403                                     _ => {}
404                                 }
405                             }
406                         }
407
408                         // Two non-ZST fields, and they're both scalars.
409                         (Some((i, &TyLayout {
410                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
411                         })), Some((j, &TyLayout {
412                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
413                         })), None) => {
414                             // Order by the memory placement, not source order.
415                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
416                                 ((i, a), (j, b))
417                             } else {
418                                 ((j, b), (i, a))
419                             };
420                             let pair = scalar_pair(a.clone(), b.clone());
421                             let pair_offsets = match pair.fields {
422                                 FieldPlacement::Arbitrary {
423                                     ref offsets,
424                                     ref memory_index
425                                 } => {
426                                     assert_eq!(memory_index, &[0, 1]);
427                                     offsets
428                                 }
429                                 _ => bug!()
430                             };
431                             if offsets[i] == pair_offsets[0] &&
432                                offsets[j] == pair_offsets[1] &&
433                                align == pair.align &&
434                                size == pair.size {
435                                 // We can use `ScalarPair` only when it matches our
436                                 // already computed layout (including `#[repr(C)]`).
437                                 abi = pair.abi;
438                             }
439                         }
440
441                         _ => {}
442                     }
443                 }
444             }
445
446             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
447                 abi = Abi::Uninhabited;
448             }
449
450             Ok(LayoutDetails {
451                 variants: Variants::Single { index: VariantIdx::new(0) },
452                 fields: FieldPlacement::Arbitrary {
453                     offsets,
454                     memory_index
455                 },
456                 abi,
457                 align,
458                 size
459             })
460         };
461         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
462             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
463         };
464         debug_assert!(!ty.has_infer_types());
465
466         Ok(match ty.sty {
467             // Basic scalars.
468             ty::Bool => {
469                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
470                     value: Int(I8, false),
471                     valid_range: 0..=1
472                 }))
473             }
474             ty::Char => {
475                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
476                     value: Int(I32, false),
477                     valid_range: 0..=0x10FFFF
478                 }))
479             }
480             ty::Int(ity) => {
481                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
482             }
483             ty::Uint(ity) => {
484                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
485             }
486             ty::Float(fty) => scalar(Float(fty)),
487             ty::FnPtr(_) => {
488                 let mut ptr = scalar_unit(Pointer);
489                 ptr.valid_range = 1..=*ptr.valid_range.end();
490                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
491             }
492
493             // The never type.
494             ty::Never => {
495                 tcx.intern_layout(LayoutDetails {
496                     variants: Variants::Single { index: VariantIdx::new(0) },
497                     fields: FieldPlacement::Union(0),
498                     abi: Abi::Uninhabited,
499                     align: dl.i8_align,
500                     size: Size::ZERO
501                 })
502             }
503
504             // Potentially-fat pointers.
505             ty::Ref(_, pointee, _) |
506             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
507                 let mut data_ptr = scalar_unit(Pointer);
508                 if !ty.is_unsafe_ptr() {
509                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
510                 }
511
512                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
513                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
514                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
515                 }
516
517                 let unsized_part = tcx.struct_tail(pointee);
518                 let metadata = match unsized_part.sty {
519                     ty::Foreign(..) => {
520                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
521                     }
522                     ty::Slice(_) | ty::Str => {
523                         scalar_unit(Int(dl.ptr_sized_integer(), false))
524                     }
525                     ty::Dynamic(..) => {
526                         let mut vtable = scalar_unit(Pointer);
527                         vtable.valid_range = 1..=*vtable.valid_range.end();
528                         vtable
529                     }
530                     _ => return Err(LayoutError::Unknown(unsized_part))
531                 };
532
533                 // Effectively a (ptr, meta) tuple.
534                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
535             }
536
537             // Arrays and slices.
538             ty::Array(element, mut count) => {
539                 if count.has_projections() {
540                     count = tcx.normalize_erasing_regions(param_env, count);
541                     if count.has_projections() {
542                         return Err(LayoutError::Unknown(ty));
543                     }
544                 }
545
546                 let element = self.layout_of(element)?;
547                 let count = count.unwrap_usize(tcx);
548                 let size = element.size.checked_mul(count, dl)
549                     .ok_or(LayoutError::SizeOverflow(ty))?;
550
551                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
552                     Abi::Uninhabited
553                 } else {
554                     Abi::Aggregate { sized: true }
555                 };
556
557                 tcx.intern_layout(LayoutDetails {
558                     variants: Variants::Single { index: VariantIdx::new(0) },
559                     fields: FieldPlacement::Array {
560                         stride: element.size,
561                         count
562                     },
563                     abi,
564                     align: element.align,
565                     size
566                 })
567             }
568             ty::Slice(element) => {
569                 let element = self.layout_of(element)?;
570                 tcx.intern_layout(LayoutDetails {
571                     variants: Variants::Single { index: VariantIdx::new(0) },
572                     fields: FieldPlacement::Array {
573                         stride: element.size,
574                         count: 0
575                     },
576                     abi: Abi::Aggregate { sized: false },
577                     align: element.align,
578                     size: Size::ZERO
579                 })
580             }
581             ty::Str => {
582                 tcx.intern_layout(LayoutDetails {
583                     variants: Variants::Single { index: VariantIdx::new(0) },
584                     fields: FieldPlacement::Array {
585                         stride: Size::from_bytes(1),
586                         count: 0
587                     },
588                     abi: Abi::Aggregate { sized: false },
589                     align: dl.i8_align,
590                     size: Size::ZERO
591                 })
592             }
593
594             // Odd unit types.
595             ty::FnDef(..) => {
596                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
597             }
598             ty::Dynamic(..) | ty::Foreign(..) => {
599                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
600                   StructKind::AlwaysSized)?;
601                 match unit.abi {
602                     Abi::Aggregate { ref mut sized } => *sized = false,
603                     _ => bug!()
604                 }
605                 tcx.intern_layout(unit)
606             }
607
608             ty::Generator(def_id, ref substs, _) => {
609                 // FIXME(tmandry): For fields that are repeated in multiple
610                 // variants in the GeneratorLayout, we need code to ensure that
611                 // the offset of these fields never change. Right now this is
612                 // not an issue since every variant has every field, but once we
613                 // optimize this we have to be more careful.
614
615                 let discr_index = substs.prefix_tys(def_id, tcx).count();
616                 let prefix_tys = substs.prefix_tys(def_id, tcx)
617                     .chain(iter::once(substs.discr_ty(tcx)));
618                 let prefix = univariant_uninterned(
619                     &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620                     &ReprOptions::default(),
621                     StructKind::AlwaysSized)?;
622
623                 let mut size = prefix.size;
624                 let mut align = prefix.align;
625                 let variants_tys = substs.state_tys(def_id, tcx);
626                 let variants = variants_tys.enumerate().map(|(i, variant_tys)| {
627                     let mut variant = univariant_uninterned(
628                         &variant_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
629                         &ReprOptions::default(),
630                         StructKind::Prefixed(prefix.size, prefix.align.abi))?;
631
632                     variant.variants = Variants::Single { index: VariantIdx::new(i) };
633
634                     size = size.max(variant.size);
635                     align = align.max(variant.align);
636
637                     Ok(variant)
638                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
639
640                 let abi = if prefix.abi.is_uninhabited() ||
641                              variants.iter().all(|v| v.abi.is_uninhabited()) {
642                     Abi::Uninhabited
643                 } else {
644                     Abi::Aggregate { sized: true }
645                 };
646                 let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi {
647                     Abi::Scalar(s) => s.clone(),
648                     _ => bug!(),
649                 };
650
651                 let layout = tcx.intern_layout(LayoutDetails {
652                     variants: Variants::Multiple {
653                         discr,
654                         discr_kind: DiscriminantKind::Tag,
655                         discr_index,
656                         variants,
657                     },
658                     fields: prefix.fields,
659                     abi,
660                     size,
661                     align,
662                 });
663                 debug!("generator layout: {:#?}", layout);
664                 layout
665             }
666
667             ty::Closure(def_id, ref substs) => {
668                 let tys = substs.upvar_tys(def_id, tcx);
669                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
670                     &ReprOptions::default(),
671                     StructKind::AlwaysSized)?
672             }
673
674             ty::Tuple(tys) => {
675                 let kind = if tys.len() == 0 {
676                     StructKind::AlwaysSized
677                 } else {
678                     StructKind::MaybeUnsized
679                 };
680
681                 univariant(&tys.iter().map(|k| {
682                     self.layout_of(k.expect_ty())
683                 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
684             }
685
686             // SIMD vector types.
687             ty::Adt(def, ..) if def.repr.simd() => {
688                 let element = self.layout_of(ty.simd_type(tcx))?;
689                 let count = ty.simd_size(tcx) as u64;
690                 assert!(count > 0);
691                 let scalar = match element.abi {
692                     Abi::Scalar(ref scalar) => scalar.clone(),
693                     _ => {
694                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
695                                                  a non-machine element type `{}`",
696                                                 ty, element.ty));
697                     }
698                 };
699                 let size = element.size.checked_mul(count, dl)
700                     .ok_or(LayoutError::SizeOverflow(ty))?;
701                 let align = dl.vector_align(size);
702                 let size = size.align_to(align.abi);
703
704                 tcx.intern_layout(LayoutDetails {
705                     variants: Variants::Single { index: VariantIdx::new(0) },
706                     fields: FieldPlacement::Array {
707                         stride: element.size,
708                         count
709                     },
710                     abi: Abi::Vector {
711                         element: scalar,
712                         count
713                     },
714                     size,
715                     align,
716                 })
717             }
718
719             // ADTs.
720             ty::Adt(def, substs) => {
721                 // Cache the field layouts.
722                 let variants = def.variants.iter().map(|v| {
723                     v.fields.iter().map(|field| {
724                         self.layout_of(field.ty(tcx, substs))
725                     }).collect::<Result<Vec<_>, _>>()
726                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
727
728                 if def.is_union() {
729                     let packed = def.repr.packed();
730                     if packed && def.repr.align > 0 {
731                         bug!("Union cannot be packed and aligned");
732                     }
733
734                     let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
735
736                     let mut align = if packed {
737                         dl.i8_align
738                     } else {
739                         dl.aggregate_align
740                     };
741
742                     if def.repr.align > 0 {
743                         let repr_align = def.repr.align as u64;
744                         align = align.max(
745                             AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
746                     }
747
748                     let optimize = !def.repr.inhibit_union_abi_opt();
749                     let mut size = Size::ZERO;
750                     let mut abi = Abi::Aggregate { sized: true };
751                     let index = VariantIdx::new(0);
752                     for field in &variants[index] {
753                         assert!(!field.is_unsized());
754
755                         let field_align = if packed {
756                             field.align.min(AbiAndPrefAlign::new(pack))
757                         } else {
758                             field.align
759                         };
760                         align = align.max(field_align);
761
762                         // If all non-ZST fields have the same ABI, forward this ABI
763                         if optimize && !field.is_zst() {
764                             // Normalize scalar_unit to the maximal valid range
765                             let field_abi = match &field.abi {
766                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
767                                 Abi::ScalarPair(x, y) => {
768                                     Abi::ScalarPair(
769                                         scalar_unit(x.value),
770                                         scalar_unit(y.value),
771                                     )
772                                 }
773                                 Abi::Vector { element: x, count } => {
774                                     Abi::Vector {
775                                         element: scalar_unit(x.value),
776                                         count: *count,
777                                     }
778                                 }
779                                 Abi::Uninhabited |
780                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
781                             };
782
783                             if size == Size::ZERO {
784                                 // first non ZST: initialize 'abi'
785                                 abi = field_abi;
786                             } else if abi != field_abi  {
787                                 // different fields have different ABI: reset to Aggregate
788                                 abi = Abi::Aggregate { sized: true };
789                             }
790                         }
791
792                         size = cmp::max(size, field.size);
793                     }
794
795                     return Ok(tcx.intern_layout(LayoutDetails {
796                         variants: Variants::Single { index },
797                         fields: FieldPlacement::Union(variants[index].len()),
798                         abi,
799                         align,
800                         size: size.align_to(align.abi)
801                     }));
802                 }
803
804                 // A variant is absent if it's uninhabited and only has ZST fields.
805                 // Present uninhabited variants only require space for their fields,
806                 // but *not* an encoding of the discriminant (e.g., a tag value).
807                 // See issue #49298 for more details on the need to leave space
808                 // for non-ZST uninhabited data (mostly partial initialization).
809                 let absent = |fields: &[TyLayout<'_>]| {
810                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
811                     let is_zst = fields.iter().all(|f| f.is_zst());
812                     uninhabited && is_zst
813                 };
814                 let (present_first, present_second) = {
815                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
816                         if absent(v) {
817                             None
818                         } else {
819                             Some(i)
820                         }
821                     });
822                     (present_variants.next(), present_variants.next())
823                 };
824                 if present_first.is_none() {
825                     // Uninhabited because it has no variants, or only absent ones.
826                     return tcx.layout_raw(param_env.and(tcx.types.never));
827                 }
828
829                 let is_struct = !def.is_enum() ||
830                     // Only one variant is present.
831                     (present_second.is_none() &&
832                     // Representation optimizations are allowed.
833                     !def.repr.inhibit_enum_layout_opt());
834                 if is_struct {
835                     // Struct, or univariant enum equivalent to a struct.
836                     // (Typechecking will reject discriminant-sizing attrs.)
837
838                     let v = present_first.unwrap();
839                     let kind = if def.is_enum() || variants[v].len() == 0 {
840                         StructKind::AlwaysSized
841                     } else {
842                         let param_env = tcx.param_env(def.did);
843                         let last_field = def.variants[v].fields.last().unwrap();
844                         let always_sized = tcx.type_of(last_field.did)
845                                               .is_sized(tcx.at(DUMMY_SP), param_env);
846                         if !always_sized { StructKind::MaybeUnsized }
847                         else { StructKind::AlwaysSized }
848                     };
849
850                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
851                     st.variants = Variants::Single { index: v };
852                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
853                     match st.abi {
854                         Abi::Scalar(ref mut scalar) |
855                         Abi::ScalarPair(ref mut scalar, _) => {
856                             // the asserts ensure that we are not using the
857                             // `#[rustc_layout_scalar_valid_range(n)]`
858                             // attribute to widen the range of anything as that would probably
859                             // result in UB somewhere
860                             if let Bound::Included(start) = start {
861                                 assert!(*scalar.valid_range.start() <= start);
862                                 scalar.valid_range = start..=*scalar.valid_range.end();
863                             }
864                             if let Bound::Included(end) = end {
865                                 assert!(*scalar.valid_range.end() >= end);
866                                 scalar.valid_range = *scalar.valid_range.start()..=end;
867                             }
868                         }
869                         _ => assert!(
870                             start == Bound::Unbounded && end == Bound::Unbounded,
871                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
872                             def,
873                             st,
874                         ),
875                     }
876                     return Ok(tcx.intern_layout(st));
877                 }
878
879                 // The current code for niche-filling relies on variant indices
880                 // instead of actual discriminants, so dataful enums with
881                 // explicit discriminants (RFC #2363) would misbehave.
882                 let no_explicit_discriminants = def.variants.iter_enumerated()
883                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
884
885                 // Niche-filling enum optimization.
886                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
887                     let mut dataful_variant = None;
888                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
889
890                     // Find one non-ZST variant.
891                     'variants: for (v, fields) in variants.iter_enumerated() {
892                         if absent(fields) {
893                             continue 'variants;
894                         }
895                         for f in fields {
896                             if !f.is_zst() {
897                                 if dataful_variant.is_none() {
898                                     dataful_variant = Some(v);
899                                     continue 'variants;
900                                 } else {
901                                     dataful_variant = None;
902                                     break 'variants;
903                                 }
904                             }
905                         }
906                         niche_variants = *niche_variants.start().min(&v)..=v;
907                     }
908
909                     if niche_variants.start() > niche_variants.end() {
910                         dataful_variant = None;
911                     }
912
913                     if let Some(i) = dataful_variant {
914                         let count = (
915                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
916                         ) as u128;
917                         for (field_index, &field) in variants[i].iter().enumerate() {
918                             let niche = match self.find_niche(field)? {
919                                 Some(niche) => niche,
920                                 _ => continue,
921                             };
922                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
923                                 Some(pair) => pair,
924                                 None => continue,
925                             };
926
927                             let mut align = dl.aggregate_align;
928                             let st = variants.iter_enumerated().map(|(j, v)| {
929                                 let mut st = univariant_uninterned(v,
930                                     &def.repr, StructKind::AlwaysSized)?;
931                                 st.variants = Variants::Single { index: j };
932
933                                 align = align.max(st.align);
934
935                                 Ok(st)
936                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
937
938                             let offset = st[i].fields.offset(field_index) + niche.offset;
939                             let size = st[i].size;
940
941                             let mut abi = match st[i].abi {
942                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
943                                 Abi::ScalarPair(ref first, ref second) => {
944                                     // We need to use scalar_unit to reset the
945                                     // valid range to the maximal one for that
946                                     // primitive, because only the niche is
947                                     // guaranteed to be initialised, not the
948                                     // other primitive.
949                                     if offset.bytes() == 0 {
950                                         Abi::ScalarPair(
951                                             niche_scalar.clone(),
952                                             scalar_unit(second.value),
953                                         )
954                                     } else {
955                                         Abi::ScalarPair(
956                                             scalar_unit(first.value),
957                                             niche_scalar.clone(),
958                                         )
959                                     }
960                                 }
961                                 _ => Abi::Aggregate { sized: true },
962                             };
963
964                             if st.iter().all(|v| v.abi.is_uninhabited()) {
965                                 abi = Abi::Uninhabited;
966                             }
967
968                             return Ok(tcx.intern_layout(LayoutDetails {
969                                 variants: Variants::Multiple {
970                                     discr: niche_scalar,
971                                     discr_kind: DiscriminantKind::Niche {
972                                         dataful_variant: i,
973                                         niche_variants,
974                                         niche_start,
975                                     },
976                                     discr_index: 0,
977                                     variants: st,
978                                 },
979                                 fields: FieldPlacement::Arbitrary {
980                                     offsets: vec![offset],
981                                     memory_index: vec![0]
982                                 },
983                                 abi,
984                                 size,
985                                 align,
986                             }));
987                         }
988                     }
989                 }
990
991                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
992                 let discr_type = def.repr.discr_type();
993                 let bits = Integer::from_attr(self, discr_type).size().bits();
994                 for (i, discr) in def.discriminants(tcx) {
995                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
996                         continue;
997                     }
998                     let mut x = discr.val as i128;
999                     if discr_type.is_signed() {
1000                         // sign extend the raw representation to be an i128
1001                         x = (x << (128 - bits)) >> (128 - bits);
1002                     }
1003                     if x < min { min = x; }
1004                     if x > max { max = x; }
1005                 }
1006                 // We might have no inhabited variants, so pretend there's at least one.
1007                 if (min, max) == (i128::max_value(), i128::min_value()) {
1008                     min = 0;
1009                     max = 0;
1010                 }
1011                 assert!(min <= max, "discriminant range is {}...{}", min, max);
1012                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1013
1014                 let mut align = dl.aggregate_align;
1015                 let mut size = Size::ZERO;
1016
1017                 // We're interested in the smallest alignment, so start large.
1018                 let mut start_align = Align::from_bytes(256).unwrap();
1019                 assert_eq!(Integer::for_align(dl, start_align), None);
1020
1021                 // repr(C) on an enum tells us to make a (tag, union) layout,
1022                 // so we need to grow the prefix alignment to be at least
1023                 // the alignment of the union. (This value is used both for
1024                 // determining the alignment of the overall enum, and the
1025                 // determining the alignment of the payload after the tag.)
1026                 let mut prefix_align = min_ity.align(dl).abi;
1027                 if def.repr.c() {
1028                     for fields in &variants {
1029                         for field in fields {
1030                             prefix_align = prefix_align.max(field.align.abi);
1031                         }
1032                     }
1033                 }
1034
1035                 // Create the set of structs that represent each variant.
1036                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
1037                     let mut st = univariant_uninterned(&field_layouts,
1038                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1039                     st.variants = Variants::Single { index: i };
1040                     // Find the first field we can't move later
1041                     // to make room for a larger discriminant.
1042                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1043                         if !field.is_zst() || field.align.abi.bytes() != 1 {
1044                             start_align = start_align.min(field.align.abi);
1045                             break;
1046                         }
1047                     }
1048                     size = cmp::max(size, st.size);
1049                     align = align.max(st.align);
1050                     Ok(st)
1051                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1052
1053                 // Align the maximum variant size to the largest alignment.
1054                 size = size.align_to(align.abi);
1055
1056                 if size.bytes() >= dl.obj_size_bound() {
1057                     return Err(LayoutError::SizeOverflow(ty));
1058                 }
1059
1060                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1061                 if typeck_ity < min_ity {
1062                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1063                     // some reason at this point (based on values discriminant can take on). Mostly
1064                     // because this discriminant will be loaded, and then stored into variable of
1065                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1066                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1067                     // discriminant values. That would be a bug, because then, in codegen, in order
1068                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1069                     // space necessary to represent would have to be discarded (or layout is wrong
1070                     // on thinking it needs 16 bits)
1071                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1072                          min_ity, typeck_ity);
1073                     // However, it is fine to make discr type however large (as an optimisation)
1074                     // after this point â€“ we’ll just truncate the value we load in codegen.
1075                 }
1076
1077                 // Check to see if we should use a different type for the
1078                 // discriminant. We can safely use a type with the same size
1079                 // as the alignment of the first field of each variant.
1080                 // We increase the size of the discriminant to avoid LLVM copying
1081                 // padding when it doesn't need to. This normally causes unaligned
1082                 // load/stores and excessive memcpy/memset operations. By using a
1083                 // bigger integer size, LLVM can be sure about its contents and
1084                 // won't be so conservative.
1085
1086                 // Use the initial field alignment
1087                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1088                     min_ity
1089                 } else {
1090                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1091                 };
1092
1093                 // If the alignment is not larger than the chosen discriminant size,
1094                 // don't use the alignment as the final size.
1095                 if ity <= min_ity {
1096                     ity = min_ity;
1097                 } else {
1098                     // Patch up the variants' first few fields.
1099                     let old_ity_size = min_ity.size();
1100                     let new_ity_size = ity.size();
1101                     for variant in &mut layout_variants {
1102                         match variant.fields {
1103                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1104                                 for i in offsets {
1105                                     if *i <= old_ity_size {
1106                                         assert_eq!(*i, old_ity_size);
1107                                         *i = new_ity_size;
1108                                     }
1109                                 }
1110                                 // We might be making the struct larger.
1111                                 if variant.size <= old_ity_size {
1112                                     variant.size = new_ity_size;
1113                                 }
1114                             }
1115                             _ => bug!()
1116                         }
1117                     }
1118                 }
1119
1120                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1121                 let tag = Scalar {
1122                     value: Int(ity, signed),
1123                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1124                 };
1125                 let mut abi = Abi::Aggregate { sized: true };
1126                 if tag.value.size(dl) == size {
1127                     abi = Abi::Scalar(tag.clone());
1128                 } else {
1129                     // Try to use a ScalarPair for all tagged enums.
1130                     let mut common_prim = None;
1131                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1132                         let offsets = match layout_variant.fields {
1133                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1134                             _ => bug!(),
1135                         };
1136                         let mut fields = field_layouts
1137                             .iter()
1138                             .zip(offsets)
1139                             .filter(|p| !p.0.is_zst());
1140                         let (field, offset) = match (fields.next(), fields.next()) {
1141                             (None, None) => continue,
1142                             (Some(pair), None) => pair,
1143                             _ => {
1144                                 common_prim = None;
1145                                 break;
1146                             }
1147                         };
1148                         let prim = match field.details.abi {
1149                             Abi::Scalar(ref scalar) => scalar.value,
1150                             _ => {
1151                                 common_prim = None;
1152                                 break;
1153                             }
1154                         };
1155                         if let Some(pair) = common_prim {
1156                             // This is pretty conservative. We could go fancier
1157                             // by conflating things like i32 and u32, or even
1158                             // realising that (u8, u8) could just cohabit with
1159                             // u16 or even u32.
1160                             if pair != (prim, offset) {
1161                                 common_prim = None;
1162                                 break;
1163                             }
1164                         } else {
1165                             common_prim = Some((prim, offset));
1166                         }
1167                     }
1168                     if let Some((prim, offset)) = common_prim {
1169                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1170                         let pair_offsets = match pair.fields {
1171                             FieldPlacement::Arbitrary {
1172                                 ref offsets,
1173                                 ref memory_index
1174                             } => {
1175                                 assert_eq!(memory_index, &[0, 1]);
1176                                 offsets
1177                             }
1178                             _ => bug!()
1179                         };
1180                         if pair_offsets[0] == Size::ZERO &&
1181                             pair_offsets[1] == *offset &&
1182                             align == pair.align &&
1183                             size == pair.size {
1184                             // We can use `ScalarPair` only when it matches our
1185                             // already computed layout (including `#[repr(C)]`).
1186                             abi = pair.abi;
1187                         }
1188                     }
1189                 }
1190
1191                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1192                     abi = Abi::Uninhabited;
1193                 }
1194
1195                 tcx.intern_layout(LayoutDetails {
1196                     variants: Variants::Multiple {
1197                         discr: tag,
1198                         discr_kind: DiscriminantKind::Tag,
1199                         discr_index: 0,
1200                         variants: layout_variants,
1201                     },
1202                     fields: FieldPlacement::Arbitrary {
1203                         offsets: vec![Size::ZERO],
1204                         memory_index: vec![0]
1205                     },
1206                     abi,
1207                     align,
1208                     size
1209                 })
1210             }
1211
1212             // Types with no meaningful known layout.
1213             ty::Projection(_) | ty::Opaque(..) => {
1214                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1215                 if ty == normalized {
1216                     return Err(LayoutError::Unknown(ty));
1217                 }
1218                 tcx.layout_raw(param_env.and(normalized))?
1219             }
1220
1221             ty::Bound(..) |
1222             ty::Placeholder(..) |
1223             ty::UnnormalizedProjection(..) |
1224             ty::GeneratorWitness(..) |
1225             ty::Infer(_) => {
1226                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1227             }
1228
1229             ty::Param(_) | ty::Error => {
1230                 return Err(LayoutError::Unknown(ty));
1231             }
1232         })
1233     }
1234
1235     /// This is invoked by the `layout_raw` query to record the final
1236     /// layout of each type.
1237     #[inline(always)]
1238     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1239         // If we are running with `-Zprint-type-sizes`, maybe record layouts
1240         // for dumping later.
1241         if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1242             self.record_layout_for_printing_outlined(layout)
1243         }
1244     }
1245
1246     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1247         // Ignore layouts that are done with non-empty environments or
1248         // non-monomorphic layouts, as the user only wants to see the stuff
1249         // resulting from the final codegen session.
1250         if
1251             layout.ty.has_param_types() ||
1252             layout.ty.has_self_ty() ||
1253             !self.param_env.caller_bounds.is_empty()
1254         {
1255             return;
1256         }
1257
1258         // (delay format until we actually need it)
1259         let record = |kind, packed, opt_discr_size, variants| {
1260             let type_desc = format!("{:?}", layout.ty);
1261             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1262                                                                    type_desc,
1263                                                                    layout.align.abi,
1264                                                                    layout.size,
1265                                                                    packed,
1266                                                                    opt_discr_size,
1267                                                                    variants);
1268         };
1269
1270         let adt_def = match layout.ty.sty {
1271             ty::Adt(ref adt_def, _) => {
1272                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1273                 adt_def
1274             }
1275
1276             ty::Closure(..) => {
1277                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1278                 record(DataTypeKind::Closure, false, None, vec![]);
1279                 return;
1280             }
1281
1282             _ => {
1283                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1284                 return;
1285             }
1286         };
1287
1288         let adt_kind = adt_def.adt_kind();
1289         let adt_packed = adt_def.repr.packed();
1290
1291         let build_variant_info = |n: Option<Ident>,
1292                                   flds: &[ast::Name],
1293                                   layout: TyLayout<'tcx>| {
1294             let mut min_size = Size::ZERO;
1295             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1296                 match layout.field(self, i) {
1297                     Err(err) => {
1298                         bug!("no layout found for field {}: `{:?}`", name, err);
1299                     }
1300                     Ok(field_layout) => {
1301                         let offset = layout.fields.offset(i);
1302                         let field_end = offset + field_layout.size;
1303                         if min_size < field_end {
1304                             min_size = field_end;
1305                         }
1306                         session::FieldInfo {
1307                             name: name.to_string(),
1308                             offset: offset.bytes(),
1309                             size: field_layout.size.bytes(),
1310                             align: field_layout.align.abi.bytes(),
1311                         }
1312                     }
1313                 }
1314             }).collect();
1315
1316             session::VariantInfo {
1317                 name: n.map(|n| n.to_string()),
1318                 kind: if layout.is_unsized() {
1319                     session::SizeKind::Min
1320                 } else {
1321                     session::SizeKind::Exact
1322                 },
1323                 align: layout.align.abi.bytes(),
1324                 size: if min_size.bytes() == 0 {
1325                     layout.size.bytes()
1326                 } else {
1327                     min_size.bytes()
1328                 },
1329                 fields: field_info,
1330             }
1331         };
1332
1333         match layout.variants {
1334             Variants::Single { index } => {
1335                 debug!("print-type-size `{:#?}` variant {}",
1336                        layout, adt_def.variants[index].ident);
1337                 if !adt_def.variants.is_empty() {
1338                     let variant_def = &adt_def.variants[index];
1339                     let fields: Vec<_> =
1340                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1341                     record(adt_kind.into(),
1342                            adt_packed,
1343                            None,
1344                            vec![build_variant_info(Some(variant_def.ident),
1345                                                    &fields,
1346                                                    layout)]);
1347                 } else {
1348                     // (This case arises for *empty* enums; so give it
1349                     // zero variants.)
1350                     record(adt_kind.into(), adt_packed, None, vec![]);
1351                 }
1352             }
1353
1354             Variants::Multiple { ref discr, ref discr_kind, .. } => {
1355                 debug!("print-type-size `{:#?}` adt general variants def {}",
1356                        layout.ty, adt_def.variants.len());
1357                 let variant_infos: Vec<_> =
1358                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1359                         let fields: Vec<_> =
1360                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1361                         build_variant_info(Some(variant_def.ident),
1362                                            &fields,
1363                                            layout.for_variant(self, i))
1364                     })
1365                     .collect();
1366                 record(adt_kind.into(), adt_packed, match discr_kind {
1367                     DiscriminantKind::Tag => Some(discr.value.size(self)),
1368                     _ => None
1369                 }, variant_infos);
1370             }
1371         }
1372     }
1373 }
1374
1375 /// Type size "skeleton", i.e., the only information determining a type's size.
1376 /// While this is conservative, (aside from constant sizes, only pointers,
1377 /// newtypes thereof and null pointer optimized enums are allowed), it is
1378 /// enough to statically check common use cases of transmute.
1379 #[derive(Copy, Clone, Debug)]
1380 pub enum SizeSkeleton<'tcx> {
1381     /// Any statically computable Layout.
1382     Known(Size),
1383
1384     /// A potentially-fat pointer.
1385     Pointer {
1386         /// If true, this pointer is never null.
1387         non_zero: bool,
1388         /// The type which determines the unsized metadata, if any,
1389         /// of this pointer. Either a type parameter or a projection
1390         /// depending on one, with regions erased.
1391         tail: Ty<'tcx>
1392     }
1393 }
1394
1395 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1396     pub fn compute(ty: Ty<'tcx>,
1397                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1398                    param_env: ty::ParamEnv<'tcx>)
1399                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1400         debug_assert!(!ty.has_infer_types());
1401
1402         // First try computing a static layout.
1403         let err = match tcx.layout_of(param_env.and(ty)) {
1404             Ok(layout) => {
1405                 return Ok(SizeSkeleton::Known(layout.size));
1406             }
1407             Err(err) => err
1408         };
1409
1410         match ty.sty {
1411             ty::Ref(_, pointee, _) |
1412             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1413                 let non_zero = !ty.is_unsafe_ptr();
1414                 let tail = tcx.struct_tail(pointee);
1415                 match tail.sty {
1416                     ty::Param(_) | ty::Projection(_) => {
1417                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1418                         Ok(SizeSkeleton::Pointer {
1419                             non_zero,
1420                             tail: tcx.erase_regions(&tail)
1421                         })
1422                     }
1423                     _ => {
1424                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1425                               tail `{}` is not a type parameter or a projection",
1426                              ty, err, tail)
1427                     }
1428                 }
1429             }
1430
1431             ty::Adt(def, substs) => {
1432                 // Only newtypes and enums w/ nullable pointer optimization.
1433                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1434                     return Err(err);
1435                 }
1436
1437                 // Get a zero-sized variant or a pointer newtype.
1438                 let zero_or_ptr_variant = |i| {
1439                     let i = VariantIdx::new(i);
1440                     let fields = def.variants[i].fields.iter().map(|field| {
1441                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1442                     });
1443                     let mut ptr = None;
1444                     for field in fields {
1445                         let field = field?;
1446                         match field {
1447                             SizeSkeleton::Known(size) => {
1448                                 if size.bytes() > 0 {
1449                                     return Err(err);
1450                                 }
1451                             }
1452                             SizeSkeleton::Pointer {..} => {
1453                                 if ptr.is_some() {
1454                                     return Err(err);
1455                                 }
1456                                 ptr = Some(field);
1457                             }
1458                         }
1459                     }
1460                     Ok(ptr)
1461                 };
1462
1463                 let v0 = zero_or_ptr_variant(0)?;
1464                 // Newtype.
1465                 if def.variants.len() == 1 {
1466                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1467                         return Ok(SizeSkeleton::Pointer {
1468                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1469                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1470                                 (Bound::Included(start), Bound::Included(end)) =>
1471                                     0 < start && start < end,
1472                                 _ => false,
1473                             },
1474                             tail,
1475                         });
1476                     } else {
1477                         return Err(err);
1478                     }
1479                 }
1480
1481                 let v1 = zero_or_ptr_variant(1)?;
1482                 // Nullable pointer enum optimization.
1483                 match (v0, v1) {
1484                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1485                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1486                         Ok(SizeSkeleton::Pointer {
1487                             non_zero: false,
1488                             tail,
1489                         })
1490                     }
1491                     _ => Err(err)
1492                 }
1493             }
1494
1495             ty::Projection(_) | ty::Opaque(..) => {
1496                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1497                 if ty == normalized {
1498                     Err(err)
1499                 } else {
1500                     SizeSkeleton::compute(normalized, tcx, param_env)
1501                 }
1502             }
1503
1504             _ => Err(err)
1505         }
1506     }
1507
1508     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1509         match (self, other) {
1510             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1511             (SizeSkeleton::Pointer { tail: a, .. },
1512              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1513             _ => false
1514         }
1515     }
1516 }
1517
1518 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1519     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1520 }
1521
1522 pub trait HasParamEnv<'tcx> {
1523     fn param_env(&self) -> ty::ParamEnv<'tcx>;
1524 }
1525
1526 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1527     fn data_layout(&self) -> &TargetDataLayout {
1528         &self.data_layout
1529     }
1530 }
1531
1532 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1533     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1534         self.global_tcx()
1535     }
1536 }
1537
1538 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1539     fn param_env(&self) -> ty::ParamEnv<'tcx> {
1540         self.param_env
1541     }
1542 }
1543
1544 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1545     fn data_layout(&self) -> &TargetDataLayout {
1546         self.tcx.data_layout()
1547     }
1548 }
1549
1550 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1551     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1552         self.tcx.tcx()
1553     }
1554 }
1555
1556 pub trait MaybeResult<T> {
1557     type Error;
1558
1559     fn from(x: Result<T, Self::Error>) -> Self;
1560     fn to_result(self) -> Result<T, Self::Error>;
1561 }
1562
1563 impl<T> MaybeResult<T> for T {
1564     type Error = !;
1565
1566     fn from(x: Result<T, Self::Error>) -> Self {
1567         let Ok(x) = x;
1568         x
1569     }
1570     fn to_result(self) -> Result<T, Self::Error> {
1571         Ok(self)
1572     }
1573 }
1574
1575 impl<T, E> MaybeResult<T> for Result<T, E> {
1576     type Error = E;
1577
1578     fn from(x: Result<T, Self::Error>) -> Self {
1579         x
1580     }
1581     fn to_result(self) -> Result<T, Self::Error> {
1582         self
1583     }
1584 }
1585
1586 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1587
1588 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1589     type Ty = Ty<'tcx>;
1590     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1591
1592     /// Computes the layout of a type. Note that this implicitly
1593     /// executes in "reveal all" mode.
1594     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1595         let param_env = self.param_env.with_reveal_all();
1596         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1597         let details = self.tcx.layout_raw(param_env.and(ty))?;
1598         let layout = TyLayout {
1599             ty,
1600             details
1601         };
1602
1603         // N.B., this recording is normally disabled; when enabled, it
1604         // can however trigger recursive invocations of `layout_of`.
1605         // Therefore, we execute it *after* the main query has
1606         // completed, to avoid problems around recursive structures
1607         // and the like. (Admittedly, I wasn't able to reproduce a problem
1608         // here, but it seems like the right thing to do. -nmatsakis)
1609         self.record_layout_for_printing(layout);
1610
1611         Ok(layout)
1612     }
1613 }
1614
1615 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1616     type Ty = Ty<'tcx>;
1617     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1618
1619     /// Computes the layout of a type. Note that this implicitly
1620     /// executes in "reveal all" mode.
1621     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1622         let param_env = self.param_env.with_reveal_all();
1623         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1624         let details = self.tcx.layout_raw(param_env.and(ty))?;
1625         let layout = TyLayout {
1626             ty,
1627             details
1628         };
1629
1630         // N.B., this recording is normally disabled; when enabled, it
1631         // can however trigger recursive invocations of `layout_of`.
1632         // Therefore, we execute it *after* the main query has
1633         // completed, to avoid problems around recursive structures
1634         // and the like. (Admittedly, I wasn't able to reproduce a problem
1635         // here, but it seems like the right thing to do. -nmatsakis)
1636         let cx = LayoutCx {
1637             tcx: *self.tcx,
1638             param_env: self.param_env
1639         };
1640         cx.record_layout_for_printing(layout);
1641
1642         Ok(layout)
1643     }
1644 }
1645
1646 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1647 impl TyCtxt<'a, 'tcx, '_> {
1648     /// Computes the layout of a type. Note that this implicitly
1649     /// executes in "reveal all" mode.
1650     #[inline]
1651     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1652                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1653         let cx = LayoutCx {
1654             tcx: self.global_tcx(),
1655             param_env: param_env_and_ty.param_env
1656         };
1657         cx.layout_of(param_env_and_ty.value)
1658     }
1659 }
1660
1661 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1662     /// Computes the layout of a type. Note that this implicitly
1663     /// executes in "reveal all" mode.
1664     #[inline]
1665     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1666                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1667         let cx = LayoutCx {
1668             tcx: self.global_tcx().at(self.span),
1669             param_env: param_env_and_ty.param_env
1670         };
1671         cx.layout_of(param_env_and_ty.value)
1672     }
1673 }
1674
1675 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1676     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1677           C::TyLayout: MaybeResult<TyLayout<'tcx>>,
1678           C: HasParamEnv<'tcx>
1679 {
1680     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1681         let details = match this.variants {
1682             Variants::Single { index } if index == variant_index => this.details,
1683
1684             Variants::Single { index } => {
1685                 // Deny calling for_variant more than once for non-Single enums.
1686                 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
1687                     assert_eq!(layout.variants, Variants::Single { index });
1688                 }
1689
1690                 let fields = match this.ty.sty {
1691                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1692                     _ => bug!()
1693                 };
1694                 let tcx = cx.tcx();
1695                 tcx.intern_layout(LayoutDetails {
1696                     variants: Variants::Single { index: variant_index },
1697                     fields: FieldPlacement::Union(fields),
1698                     abi: Abi::Uninhabited,
1699                     align: tcx.data_layout.i8_align,
1700                     size: Size::ZERO
1701                 })
1702             }
1703
1704             Variants::Multiple { ref variants, .. } => {
1705                 &variants[variant_index]
1706             }
1707         };
1708
1709         assert_eq!(details.variants, Variants::Single { index: variant_index });
1710
1711         TyLayout {
1712             ty: this.ty,
1713             details
1714         }
1715     }
1716
1717     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1718         let tcx = cx.tcx();
1719         let discr_layout = |discr: &Scalar| -> C::TyLayout {
1720             let layout = LayoutDetails::scalar(cx, discr.clone());
1721             MaybeResult::from(Ok(TyLayout {
1722                 details: tcx.intern_layout(layout),
1723                 ty: discr.value.to_ty(tcx),
1724             }))
1725         };
1726
1727         cx.layout_of(match this.ty.sty {
1728             ty::Bool |
1729             ty::Char |
1730             ty::Int(_) |
1731             ty::Uint(_) |
1732             ty::Float(_) |
1733             ty::FnPtr(_) |
1734             ty::Never |
1735             ty::FnDef(..) |
1736             ty::GeneratorWitness(..) |
1737             ty::Foreign(..) |
1738             ty::Dynamic(..) => {
1739                 bug!("TyLayout::field_type({:?}): not applicable", this)
1740             }
1741
1742             // Potentially-fat pointers.
1743             ty::Ref(_, pointee, _) |
1744             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1745                 assert!(i < this.fields.count());
1746
1747                 // Reuse the fat *T type as its own thin pointer data field.
1748                 // This provides information about e.g., DST struct pointees
1749                 // (which may have no non-DST form), and will work as long
1750                 // as the `Abi` or `FieldPlacement` is checked by users.
1751                 if i == 0 {
1752                     let nil = tcx.mk_unit();
1753                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1754                         tcx.mk_mut_ptr(nil)
1755                     } else {
1756                         tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
1757                     };
1758                     return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
1759                         ptr_layout.ty = this.ty;
1760                         ptr_layout
1761                     }));
1762                 }
1763
1764                 match tcx.struct_tail(pointee).sty {
1765                     ty::Slice(_) |
1766                     ty::Str => tcx.types.usize,
1767                     ty::Dynamic(_, _) => {
1768                         tcx.mk_imm_ref(
1769                             tcx.lifetimes.re_static,
1770                             tcx.mk_array(tcx.types.usize, 3),
1771                         )
1772                         /* FIXME: use actual fn pointers
1773                         Warning: naively computing the number of entries in the
1774                         vtable by counting the methods on the trait + methods on
1775                         all parent traits does not work, because some methods can
1776                         be not object safe and thus excluded from the vtable.
1777                         Increase this counter if you tried to implement this but
1778                         failed to do it without duplicating a lot of code from
1779                         other places in the compiler: 2
1780                         tcx.mk_tup(&[
1781                             tcx.mk_array(tcx.types.usize, 3),
1782                             tcx.mk_array(Option<fn()>),
1783                         ])
1784                         */
1785                     }
1786                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1787                 }
1788             }
1789
1790             // Arrays and slices.
1791             ty::Array(element, _) |
1792             ty::Slice(element) => element,
1793             ty::Str => tcx.types.u8,
1794
1795             // Tuples, generators and closures.
1796             ty::Closure(def_id, ref substs) => {
1797                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1798             }
1799
1800             ty::Generator(def_id, ref substs, _) => {
1801                 match this.variants {
1802                     Variants::Single { index } => {
1803                         substs.state_tys(def_id, tcx)
1804                             .nth(index.as_usize()).unwrap()
1805                             .nth(i).unwrap()
1806                     }
1807                     Variants::Multiple { ref discr, discr_index, .. } => {
1808                         if i == discr_index {
1809                             return discr_layout(discr);
1810                         }
1811                         substs.prefix_tys(def_id, tcx).nth(i).unwrap()
1812                     }
1813                 }
1814             }
1815
1816             ty::Tuple(tys) => tys[i].expect_ty(),
1817
1818             // SIMD vector types.
1819             ty::Adt(def, ..) if def.repr.simd() => {
1820                 this.ty.simd_type(tcx)
1821             }
1822
1823             // ADTs.
1824             ty::Adt(def, substs) => {
1825                 match this.variants {
1826                     Variants::Single { index } => {
1827                         def.variants[index].fields[i].ty(tcx, substs)
1828                     }
1829
1830                     // Discriminant field for enums (where applicable).
1831                     Variants::Multiple { ref discr, .. } => {
1832                         assert_eq!(i, 0);
1833                         return discr_layout(discr);
1834                     }
1835                 }
1836             }
1837
1838             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1839             ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
1840             ty::Error => {
1841                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1842             }
1843         })
1844     }
1845
1846     fn pointee_info_at(
1847         this: TyLayout<'tcx>,
1848         cx: &C,
1849         offset: Size,
1850     ) -> Option<PointeeInfo> {
1851         match this.ty.sty {
1852             ty::RawPtr(mt) if offset.bytes() == 0 => {
1853                 cx.layout_of(mt.ty).to_result().ok()
1854                     .map(|layout| PointeeInfo {
1855                         size: layout.size,
1856                         align: layout.align.abi,
1857                         safe: None,
1858                     })
1859             }
1860
1861             ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
1862                 let tcx = cx.tcx();
1863                 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
1864                 let kind = match mt {
1865                     hir::MutImmutable => if is_freeze {
1866                         PointerKind::Frozen
1867                     } else {
1868                         PointerKind::Shared
1869                     },
1870                     hir::MutMutable => {
1871                         // Previously we would only emit noalias annotations for LLVM >= 6 or in
1872                         // panic=abort mode. That was deemed right, as prior versions had many bugs
1873                         // in conjunction with unwinding, but later versions didn’t seem to have
1874                         // said issues. See issue #31681.
1875                         //
1876                         // Alas, later on we encountered a case where noalias would generate wrong
1877                         // code altogether even with recent versions of LLVM in *safe* code with no
1878                         // unwinding involved. See #54462.
1879                         //
1880                         // For now, do not enable mutable_noalias by default at all, while the
1881                         // issue is being figured out.
1882                         let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
1883                             .unwrap_or(false);
1884                         if mutable_noalias {
1885                             PointerKind::UniqueBorrowed
1886                         } else {
1887                             PointerKind::Shared
1888                         }
1889                     }
1890                 };
1891
1892                 cx.layout_of(ty).to_result().ok()
1893                     .map(|layout| PointeeInfo {
1894                         size: layout.size,
1895                         align: layout.align.abi,
1896                         safe: Some(kind),
1897                     })
1898             }
1899
1900             _ => {
1901                 let mut data_variant = match this.variants {
1902                     // Within the discriminant field, only the niche itself is
1903                     // always initialized, so we only check for a pointer at its
1904                     // offset.
1905                     //
1906                     // If the niche is a pointer, it's either valid (according
1907                     // to its type), or null (which the niche field's scalar
1908                     // validity range encodes).  This allows using
1909                     // `dereferenceable_or_null` for e.g., `Option<&T>`, and
1910                     // this will continue to work as long as we don't start
1911                     // using more niches than just null (e.g., the first page of
1912                     // the address space, or unaligned pointers).
1913                     Variants::Multiple {
1914                         discr_kind: DiscriminantKind::Niche {
1915                             dataful_variant,
1916                             ..
1917                         },
1918                         discr_index,
1919                         ..
1920                     } if this.fields.offset(discr_index) == offset =>
1921                         Some(this.for_variant(cx, dataful_variant)),
1922                     _ => Some(this),
1923                 };
1924
1925                 if let Some(variant) = data_variant {
1926                     // We're not interested in any unions.
1927                     if let FieldPlacement::Union(_) = variant.fields {
1928                         data_variant = None;
1929                     }
1930                 }
1931
1932                 let mut result = None;
1933
1934                 if let Some(variant) = data_variant {
1935                     let ptr_end = offset + Pointer.size(cx);
1936                     for i in 0..variant.fields.count() {
1937                         let field_start = variant.fields.offset(i);
1938                         if field_start <= offset {
1939                             let field = variant.field(cx, i);
1940                             result = field.to_result().ok()
1941                                 .and_then(|field| {
1942                                     if ptr_end <= field_start + field.size {
1943                                         // We found the right field, look inside it.
1944                                         field.pointee_info_at(cx, offset - field_start)
1945                                     } else {
1946                                         None
1947                                     }
1948                                 });
1949                             if result.is_some() {
1950                                 break;
1951                             }
1952                         }
1953                     }
1954                 }
1955
1956                 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
1957                 if let Some(ref mut pointee) = result {
1958                     if let ty::Adt(def, _) = this.ty.sty {
1959                         if def.is_box() && offset.bytes() == 0 {
1960                             pointee.safe = Some(PointerKind::UniqueOwned);
1961                         }
1962                     }
1963                 }
1964
1965                 result
1966             }
1967         }
1968     }
1969 }
1970
1971 struct Niche {
1972     offset: Size,
1973     scalar: Scalar,
1974     available: u128,
1975 }
1976
1977 impl Niche {
1978     fn reserve<'a, 'tcx>(
1979         &self,
1980         cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1981         count: u128,
1982     ) -> Option<(u128, Scalar)> {
1983         if count > self.available {
1984             return None;
1985         }
1986         let Scalar { value, valid_range: ref v } = self.scalar;
1987         let bits = value.size(cx).bits();
1988         assert!(bits <= 128);
1989         let max_value = !0u128 >> (128 - bits);
1990         let start = v.end().wrapping_add(1) & max_value;
1991         let end = v.end().wrapping_add(count) & max_value;
1992         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1993     }
1994 }
1995
1996 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1997     /// Find the offset of a niche leaf field, starting from
1998     /// the given type and recursing through aggregates.
1999     // FIXME(eddyb) traverse already optimized enums.
2000     fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
2001         let scalar_niche = |scalar: &Scalar, offset| {
2002             let Scalar { value, valid_range: ref v } = *scalar;
2003
2004             let bits = value.size(self).bits();
2005             assert!(bits <= 128);
2006             let max_value = !0u128 >> (128 - bits);
2007
2008             // Find out how many values are outside the valid range.
2009             let available = if v.start() <= v.end() {
2010                 v.start() + (max_value - v.end())
2011             } else {
2012                 v.start() - v.end() - 1
2013             };
2014
2015             // Give up if there is no niche value available.
2016             if available == 0 {
2017                 return None;
2018             }
2019
2020             Some(Niche { offset, scalar: scalar.clone(), available })
2021         };
2022
2023         // Locals variables which live across yields are stored
2024         // in the generator type as fields. These may be uninitialized
2025         // so we don't look for niches there.
2026         if let ty::Generator(..) = layout.ty.sty {
2027             return Ok(None);
2028         }
2029
2030         match layout.abi {
2031             Abi::Scalar(ref scalar) => {
2032                 return Ok(scalar_niche(scalar, Size::ZERO));
2033             }
2034             Abi::ScalarPair(ref a, ref b) => {
2035                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
2036                 // returns the last maximum.
2037                 let niche = iter::once(
2038                     (b, a.value.size(self).align_to(b.value.align(self).abi))
2039                 )
2040                     .chain(iter::once((a, Size::ZERO)))
2041                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
2042                     .max_by_key(|niche| niche.available);
2043                 return Ok(niche);
2044             }
2045             Abi::Vector { ref element, .. } => {
2046                 return Ok(scalar_niche(element, Size::ZERO));
2047             }
2048             _ => {}
2049         }
2050
2051         // Perhaps one of the fields is non-zero, let's recurse and find out.
2052         if let FieldPlacement::Union(_) = layout.fields {
2053             // Only Rust enums have safe-to-inspect fields
2054             // (a discriminant), other unions are unsafe.
2055             if let Variants::Single { .. } = layout.variants {
2056                 return Ok(None);
2057             }
2058         }
2059         if let FieldPlacement::Array { count: original_64_bit_count, .. } = layout.fields {
2060             // rust-lang/rust#57038: avoid ICE within FieldPlacement::count when count too big
2061             if original_64_bit_count > usize::max_value() as u64 {
2062                 return Err(LayoutError::SizeOverflow(layout.ty));
2063             }
2064             if layout.fields.count() > 0 {
2065                 return self.find_niche(layout.field(self, 0)?);
2066             } else {
2067                 return Ok(None);
2068             }
2069         }
2070         let mut niche = None;
2071         let mut available = 0;
2072         for i in 0..layout.fields.count() {
2073             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
2074                 if c.available > available {
2075                     available = c.available;
2076                     c.offset += layout.fields.offset(i);
2077                     niche = Some(c);
2078                 }
2079             }
2080         }
2081         Ok(niche)
2082     }
2083 }
2084
2085 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
2086     fn hash_stable<W: StableHasherResult>(&self,
2087                                           hcx: &mut StableHashingContext<'a>,
2088                                           hasher: &mut StableHasher<W>) {
2089         use crate::ty::layout::Variants::*;
2090         mem::discriminant(self).hash_stable(hcx, hasher);
2091
2092         match *self {
2093             Single { index } => {
2094                 index.hash_stable(hcx, hasher);
2095             }
2096             Multiple {
2097                 ref discr,
2098                 ref discr_kind,
2099                 discr_index,
2100                 ref variants,
2101             } => {
2102                 discr.hash_stable(hcx, hasher);
2103                 discr_kind.hash_stable(hcx, hasher);
2104                 discr_index.hash_stable(hcx, hasher);
2105                 variants.hash_stable(hcx, hasher);
2106             }
2107         }
2108     }
2109 }
2110
2111 impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2112     fn hash_stable<W: StableHasherResult>(&self,
2113                                           hcx: &mut StableHashingContext<'a>,
2114                                           hasher: &mut StableHasher<W>) {
2115         use crate::ty::layout::DiscriminantKind::*;
2116         mem::discriminant(self).hash_stable(hcx, hasher);
2117
2118         match *self {
2119             Tag => {}
2120             Niche {
2121                 dataful_variant,
2122                 ref niche_variants,
2123                 niche_start,
2124             } => {
2125                 dataful_variant.hash_stable(hcx, hasher);
2126                 niche_variants.start().hash_stable(hcx, hasher);
2127                 niche_variants.end().hash_stable(hcx, hasher);
2128                 niche_start.hash_stable(hcx, hasher);
2129             }
2130         }
2131     }
2132 }
2133
2134 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
2135     fn hash_stable<W: StableHasherResult>(&self,
2136                                           hcx: &mut StableHashingContext<'a>,
2137                                           hasher: &mut StableHasher<W>) {
2138         use crate::ty::layout::FieldPlacement::*;
2139         mem::discriminant(self).hash_stable(hcx, hasher);
2140
2141         match *self {
2142             Union(count) => {
2143                 count.hash_stable(hcx, hasher);
2144             }
2145             Array { count, stride } => {
2146                 count.hash_stable(hcx, hasher);
2147                 stride.hash_stable(hcx, hasher);
2148             }
2149             Arbitrary { ref offsets, ref memory_index } => {
2150                 offsets.hash_stable(hcx, hasher);
2151                 memory_index.hash_stable(hcx, hasher);
2152             }
2153         }
2154     }
2155 }
2156
2157 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2158     fn hash_stable<W: StableHasherResult>(
2159         &self,
2160         hcx: &mut StableHashingContext<'a>,
2161         hasher: &mut StableHasher<W>,
2162     ) {
2163         self.as_u32().hash_stable(hcx, hasher)
2164     }
2165 }
2166
2167 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
2168     fn hash_stable<W: StableHasherResult>(&self,
2169                                           hcx: &mut StableHashingContext<'a>,
2170                                           hasher: &mut StableHasher<W>) {
2171         use crate::ty::layout::Abi::*;
2172         mem::discriminant(self).hash_stable(hcx, hasher);
2173
2174         match *self {
2175             Uninhabited => {}
2176             Scalar(ref value) => {
2177                 value.hash_stable(hcx, hasher);
2178             }
2179             ScalarPair(ref a, ref b) => {
2180                 a.hash_stable(hcx, hasher);
2181                 b.hash_stable(hcx, hasher);
2182             }
2183             Vector { ref element, count } => {
2184                 element.hash_stable(hcx, hasher);
2185                 count.hash_stable(hcx, hasher);
2186             }
2187             Aggregate { sized } => {
2188                 sized.hash_stable(hcx, hasher);
2189             }
2190         }
2191     }
2192 }
2193
2194 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
2195     fn hash_stable<W: StableHasherResult>(&self,
2196                                           hcx: &mut StableHashingContext<'a>,
2197                                           hasher: &mut StableHasher<W>) {
2198         let Scalar { value, ref valid_range } = *self;
2199         value.hash_stable(hcx, hasher);
2200         valid_range.start().hash_stable(hcx, hasher);
2201         valid_range.end().hash_stable(hcx, hasher);
2202     }
2203 }
2204
2205 impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
2206     variants,
2207     fields,
2208     abi,
2209     size,
2210     align
2211 });
2212
2213 impl_stable_hash_for!(enum crate::ty::layout::Integer {
2214     I8,
2215     I16,
2216     I32,
2217     I64,
2218     I128
2219 });
2220
2221 impl_stable_hash_for!(enum crate::ty::layout::Primitive {
2222     Int(integer, signed),
2223     Float(fty),
2224     Pointer
2225 });
2226
2227 impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
2228     abi,
2229     pref
2230 });
2231
2232 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
2233     fn hash_stable<W: StableHasherResult>(&self,
2234                                           hcx: &mut StableHashingContext<'gcx>,
2235                                           hasher: &mut StableHasher<W>) {
2236         self.bytes().hash_stable(hcx, hasher);
2237     }
2238 }
2239
2240 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2241     fn hash_stable<W: StableHasherResult>(&self,
2242                                           hcx: &mut StableHashingContext<'gcx>,
2243                                           hasher: &mut StableHasher<W>) {
2244         self.bytes().hash_stable(hcx, hasher);
2245     }
2246 }
2247
2248 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2249 {
2250     fn hash_stable<W: StableHasherResult>(&self,
2251                                           hcx: &mut StableHashingContext<'a>,
2252                                           hasher: &mut StableHasher<W>) {
2253         use crate::ty::layout::LayoutError::*;
2254         mem::discriminant(self).hash_stable(hcx, hasher);
2255
2256         match *self {
2257             Unknown(t) |
2258             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2259         }
2260     }
2261 }