]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Rollup merge of #55963 - stepancheg:mpsc-take-2, r=alexcrichton
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23 use std::ops::Bound;
24
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
28                                            StableHasherResult};
29
30 pub use rustc_target::abi::*;
31
32 pub trait IntegerExt {
33     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
36                             ty: Ty<'tcx>,
37                             repr: &ReprOptions,
38                             min: i128,
39                             max: i128)
40                             -> (Integer, bool);
41 }
42
43 impl IntegerExt for Integer {
44     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45         match (*self, signed) {
46             (I8, false) => tcx.types.u8,
47             (I16, false) => tcx.types.u16,
48             (I32, false) => tcx.types.u32,
49             (I64, false) => tcx.types.u64,
50             (I128, false) => tcx.types.u128,
51             (I8, true) => tcx.types.i8,
52             (I16, true) => tcx.types.i16,
53             (I32, true) => tcx.types.i32,
54             (I64, true) => tcx.types.i64,
55             (I128, true) => tcx.types.i128,
56         }
57     }
58
59     /// Get the Integer type from an attr::IntType.
60     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61         let dl = cx.data_layout();
62
63         match ity {
64             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70                 dl.ptr_sized_integer()
71             }
72         }
73     }
74
75     /// Find the appropriate Integer type and signedness for the given
76     /// signed discriminant range and #[repr] attribute.
77     /// N.B.: u128 values above i128::MAX will be treated as signed, but
78     /// that shouldn't affect anything, other than maybe debuginfo.
79     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
80                             ty: Ty<'tcx>,
81                             repr: &ReprOptions,
82                             min: i128,
83                             max: i128)
84                             -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100                       discriminant range of enum `{}", ty)
101             }
102             return (discr, ity.is_signed());
103         }
104
105         if repr.c() {
106             match &tcx.sess.target.target.arch[..] {
107                 // WARNING: the ARM EABI has two variants; the one corresponding
108                 // to `at_least == I32` appears to be used on Linux and NetBSD,
109                 // but some systems may use the variant corresponding to no
110                 // lower bound. However, we don't run on those yet...?
111                 "arm" => min_from_extern = Some(I32),
112                 _ => min_from_extern = Some(I32),
113             }
114         }
115
116         let at_least = min_from_extern.unwrap_or(min_default);
117
118         // If there are no negative values, we can use the unsigned fit.
119         if min >= 0 {
120             (cmp::max(unsigned_fit, at_least), false)
121         } else {
122             (cmp::max(signed_fit, at_least), true)
123         }
124     }
125 }
126
127 pub trait PrimitiveExt {
128     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
129 }
130
131 impl PrimitiveExt for Primitive {
132     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
133         match *self {
134             Int(i, signed) => i.to_ty(tcx, signed),
135             Float(FloatTy::F32) => tcx.types.f32,
136             Float(FloatTy::F64) => tcx.types.f64,
137             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
138         }
139     }
140 }
141
142 /// The first half of a fat pointer.
143 ///
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
147
148 /// The second half of a fat pointer.
149 ///
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
153
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
156     Unknown(Ty<'tcx>),
157     SizeOverflow(Ty<'tcx>)
158 }
159
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162         match *self {
163             LayoutError::Unknown(ty) => {
164                 write!(f, "the type `{:?}` has an unknown layout", ty)
165             }
166             LayoutError::SizeOverflow(ty) => {
167                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
168             }
169         }
170     }
171 }
172
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
176 {
177     ty::tls::with_related_context(tcx, move |icx| {
178         let rec_limit = *tcx.sess.recursion_limit.get();
179         let (param_env, ty) = query.into_parts();
180
181         if icx.layout_depth > rec_limit {
182             tcx.sess.fatal(
183                 &format!("overflow representing the type `{}`", ty));
184         }
185
186         // Update the ImplicitCtxt to increase the layout_depth
187         let icx = ty::tls::ImplicitCtxt {
188             layout_depth: icx.layout_depth + 1,
189             ..icx.clone()
190         };
191
192         ty::tls::enter_context(&icx, |_| {
193             let cx = LayoutCx { tcx, param_env };
194             cx.layout_raw_uncached(ty)
195         })
196     })
197 }
198
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200     *providers = ty::query::Providers {
201         layout_raw,
202         ..*providers
203     };
204 }
205
206 pub struct LayoutCx<'tcx, C> {
207     pub tcx: C,
208     pub param_env: ty::ParamEnv<'tcx>
209 }
210
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212     fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214         let tcx = self.tcx;
215         let param_env = self.param_env;
216         let dl = self.data_layout();
217         let scalar_unit = |value: Primitive| {
218             let bits = value.size(dl).bits();
219             assert!(bits <= 128);
220             Scalar {
221                 value,
222                 valid_range: 0..=(!0 >> (128 - bits))
223             }
224         };
225         let scalar = |value: Primitive| {
226             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227         };
228         let scalar_pair = |a: Scalar, b: Scalar| {
229             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231             let size = (b_offset + b.value.size(dl)).abi_align(align);
232             LayoutDetails {
233                 variants: Variants::Single { index: VariantIdx::new(0) },
234                 fields: FieldPlacement::Arbitrary {
235                     offsets: vec![Size::ZERO, b_offset],
236                     memory_index: vec![0, 1]
237                 },
238                 abi: Abi::ScalarPair(a, b),
239                 align,
240                 size
241             }
242         };
243
244         #[derive(Copy, Clone, Debug)]
245         enum StructKind {
246             /// A tuple, closure, or univariant which cannot be coerced to unsized.
247             AlwaysSized,
248             /// A univariant, the last field of which may be coerced to unsized.
249             MaybeUnsized,
250             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251             Prefixed(Size, Align),
252         }
253
254         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
255             let packed = repr.packed();
256             if packed && repr.align > 0 {
257                 bug!("struct cannot be packed and aligned");
258             }
259
260             let pack = {
261                 let pack = repr.pack as u64;
262                 Align::from_bytes(pack, pack).unwrap()
263             };
264
265             let mut align = if packed {
266                 dl.i8_align
267             } else {
268                 dl.aggregate_align
269             };
270
271             let mut sized = true;
272             let mut offsets = vec![Size::ZERO; fields.len()];
273             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
274
275             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
276             if let StructKind::Prefixed(_, align) = kind {
277                 optimize &= align.abi() == 1;
278             }
279
280             if optimize {
281                 let end = if let StructKind::MaybeUnsized = kind {
282                     fields.len() - 1
283                 } else {
284                     fields.len()
285                 };
286                 let optimizing = &mut inverse_memory_index[..end];
287                 let field_align = |f: &TyLayout<'_>| {
288                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
289                 };
290                 match kind {
291                     StructKind::AlwaysSized |
292                     StructKind::MaybeUnsized => {
293                         optimizing.sort_by_key(|&x| {
294                             // Place ZSTs first to avoid "interesting offsets",
295                             // especially with only one or two non-ZST fields.
296                             let f = &fields[x as usize];
297                             (!f.is_zst(), cmp::Reverse(field_align(f)))
298                         });
299                     }
300                     StructKind::Prefixed(..) => {
301                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
302                     }
303                 }
304             }
305
306             // inverse_memory_index holds field indices by increasing memory offset.
307             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
308             // We now write field offsets to the corresponding offset slot;
309             // field 5 with offset 0 puts 0 in offsets[5].
310             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
311
312             let mut offset = Size::ZERO;
313
314             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
315                 if packed {
316                     let prefix_align = prefix_align.min(pack);
317                     align = align.max(prefix_align);
318                 } else {
319                     align = align.max(prefix_align);
320                 }
321                 offset = prefix_size.abi_align(prefix_align);
322             }
323
324             for &i in &inverse_memory_index {
325                 let field = fields[i as usize];
326                 if !sized {
327                     bug!("univariant: field #{} of `{}` comes after unsized field",
328                          offsets.len(), ty);
329                 }
330
331                 if field.is_unsized() {
332                     sized = false;
333                 }
334
335                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
336                 if packed {
337                     let field_pack = field.align.min(pack);
338                     offset = offset.abi_align(field_pack);
339                     align = align.max(field_pack);
340                 }
341                 else {
342                     offset = offset.abi_align(field.align);
343                     align = align.max(field.align);
344                 }
345
346                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
347                 offsets[i as usize] = offset;
348
349                 offset = offset.checked_add(field.size, dl)
350                     .ok_or(LayoutError::SizeOverflow(ty))?;
351             }
352
353             if repr.align > 0 {
354                 let repr_align = repr.align as u64;
355                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
356                 debug!("univariant repr_align: {:?}", repr_align);
357             }
358
359             debug!("univariant min_size: {:?}", offset);
360             let min_size = offset;
361
362             // As stated above, inverse_memory_index holds field indices by increasing offset.
363             // This makes it an already-sorted view of the offsets vec.
364             // To invert it, consider:
365             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
366             // Field 5 would be the first element, so memory_index is i:
367             // Note: if we didn't optimize, it's already right.
368
369             let mut memory_index;
370             if optimize {
371                 memory_index = vec![0; inverse_memory_index.len()];
372
373                 for i in 0..inverse_memory_index.len() {
374                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
375                 }
376             } else {
377                 memory_index = inverse_memory_index;
378             }
379
380             let size = min_size.abi_align(align);
381             let mut abi = Abi::Aggregate { sized };
382
383             // Unpack newtype ABIs and find scalar pairs.
384             if sized && size.bytes() > 0 {
385                 // All other fields must be ZSTs, and we need them to all start at 0.
386                 let mut zst_offsets =
387                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
388                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
389                     let mut non_zst_fields =
390                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
391
392                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
393                         // We have exactly one non-ZST field.
394                         (Some((i, field)), None, None) => {
395                             // Field fills the struct and it has a scalar or scalar pair ABI.
396                             if offsets[i].bytes() == 0 &&
397                                align.abi() == field.align.abi() &&
398                                size == field.size {
399                                 match field.abi {
400                                     // For plain scalars, or vectors of them, we can't unpack
401                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
402                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
403                                         abi = field.abi.clone();
404                                     }
405                                     // But scalar pairs are Rust-specific and get
406                                     // treated as aggregates by C ABIs anyway.
407                                     Abi::ScalarPair(..) => {
408                                         abi = field.abi.clone();
409                                     }
410                                     _ => {}
411                                 }
412                             }
413                         }
414
415                         // Two non-ZST fields, and they're both scalars.
416                         (Some((i, &TyLayout {
417                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
418                         })), Some((j, &TyLayout {
419                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
420                         })), None) => {
421                             // Order by the memory placement, not source order.
422                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
423                                 ((i, a), (j, b))
424                             } else {
425                                 ((j, b), (i, a))
426                             };
427                             let pair = scalar_pair(a.clone(), b.clone());
428                             let pair_offsets = match pair.fields {
429                                 FieldPlacement::Arbitrary {
430                                     ref offsets,
431                                     ref memory_index
432                                 } => {
433                                     assert_eq!(memory_index, &[0, 1]);
434                                     offsets
435                                 }
436                                 _ => bug!()
437                             };
438                             if offsets[i] == pair_offsets[0] &&
439                                offsets[j] == pair_offsets[1] &&
440                                align == pair.align &&
441                                size == pair.size {
442                                 // We can use `ScalarPair` only when it matches our
443                                 // already computed layout (including `#[repr(C)]`).
444                                 abi = pair.abi;
445                             }
446                         }
447
448                         _ => {}
449                     }
450                 }
451             }
452
453             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
454                 abi = Abi::Uninhabited;
455             }
456
457             Ok(LayoutDetails {
458                 variants: Variants::Single { index: VariantIdx::new(0) },
459                 fields: FieldPlacement::Arbitrary {
460                     offsets,
461                     memory_index
462                 },
463                 abi,
464                 align,
465                 size
466             })
467         };
468         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
469             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
470         };
471         debug_assert!(!ty.has_infer_types());
472
473         Ok(match ty.sty {
474             // Basic scalars.
475             ty::Bool => {
476                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
477                     value: Int(I8, false),
478                     valid_range: 0..=1
479                 }))
480             }
481             ty::Char => {
482                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
483                     value: Int(I32, false),
484                     valid_range: 0..=0x10FFFF
485                 }))
486             }
487             ty::Int(ity) => {
488                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
489             }
490             ty::Uint(ity) => {
491                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
492             }
493             ty::Float(fty) => scalar(Float(fty)),
494             ty::FnPtr(_) => {
495                 let mut ptr = scalar_unit(Pointer);
496                 ptr.valid_range = 1..=*ptr.valid_range.end();
497                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
498             }
499
500             // The never type.
501             ty::Never => {
502                 tcx.intern_layout(LayoutDetails {
503                     variants: Variants::Single { index: VariantIdx::new(0) },
504                     fields: FieldPlacement::Union(0),
505                     abi: Abi::Uninhabited,
506                     align: dl.i8_align,
507                     size: Size::ZERO
508                 })
509             }
510
511             // Potentially-fat pointers.
512             ty::Ref(_, pointee, _) |
513             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514                 let mut data_ptr = scalar_unit(Pointer);
515                 if !ty.is_unsafe_ptr() {
516                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517                 }
518
519                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
522                 }
523
524                 let unsized_part = tcx.struct_tail(pointee);
525                 let metadata = match unsized_part.sty {
526                     ty::Foreign(..) => {
527                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
528                     }
529                     ty::Slice(_) | ty::Str => {
530                         scalar_unit(Int(dl.ptr_sized_integer(), false))
531                     }
532                     ty::Dynamic(..) => {
533                         let mut vtable = scalar_unit(Pointer);
534                         vtable.valid_range = 1..=*vtable.valid_range.end();
535                         vtable
536                     }
537                     _ => return Err(LayoutError::Unknown(unsized_part))
538                 };
539
540                 // Effectively a (ptr, meta) tuple.
541                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
542             }
543
544             // Arrays and slices.
545             ty::Array(element, mut count) => {
546                 if count.has_projections() {
547                     count = tcx.normalize_erasing_regions(param_env, count);
548                     if count.has_projections() {
549                         return Err(LayoutError::Unknown(ty));
550                     }
551                 }
552
553                 let element = self.layout_of(element)?;
554                 let count = count.unwrap_usize(tcx);
555                 let size = element.size.checked_mul(count, dl)
556                     .ok_or(LayoutError::SizeOverflow(ty))?;
557
558                 tcx.intern_layout(LayoutDetails {
559                     variants: Variants::Single { index: VariantIdx::new(0) },
560                     fields: FieldPlacement::Array {
561                         stride: element.size,
562                         count
563                     },
564                     abi: Abi::Aggregate { sized: true },
565                     align: element.align,
566                     size
567                 })
568             }
569             ty::Slice(element) => {
570                 let element = self.layout_of(element)?;
571                 tcx.intern_layout(LayoutDetails {
572                     variants: Variants::Single { index: VariantIdx::new(0) },
573                     fields: FieldPlacement::Array {
574                         stride: element.size,
575                         count: 0
576                     },
577                     abi: Abi::Aggregate { sized: false },
578                     align: element.align,
579                     size: Size::ZERO
580                 })
581             }
582             ty::Str => {
583                 tcx.intern_layout(LayoutDetails {
584                     variants: Variants::Single { index: VariantIdx::new(0) },
585                     fields: FieldPlacement::Array {
586                         stride: Size::from_bytes(1),
587                         count: 0
588                     },
589                     abi: Abi::Aggregate { sized: false },
590                     align: dl.i8_align,
591                     size: Size::ZERO
592                 })
593             }
594
595             // Odd unit types.
596             ty::FnDef(..) => {
597                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
598             }
599             ty::Dynamic(..) | ty::Foreign(..) => {
600                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
601                   StructKind::AlwaysSized)?;
602                 match unit.abi {
603                     Abi::Aggregate { ref mut sized } => *sized = false,
604                     _ => bug!()
605                 }
606                 tcx.intern_layout(unit)
607             }
608
609             // Tuples, generators and closures.
610             ty::Generator(def_id, ref substs, _) => {
611                 let tys = substs.field_tys(def_id, tcx);
612                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
613                     &ReprOptions::default(),
614                     StructKind::AlwaysSized)?
615             }
616
617             ty::Closure(def_id, ref substs) => {
618                 let tys = substs.upvar_tys(def_id, tcx);
619                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620                     &ReprOptions::default(),
621                     StructKind::AlwaysSized)?
622             }
623
624             ty::Tuple(tys) => {
625                 let kind = if tys.len() == 0 {
626                     StructKind::AlwaysSized
627                 } else {
628                     StructKind::MaybeUnsized
629                 };
630
631                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
632                            &ReprOptions::default(), kind)?
633             }
634
635             // SIMD vector types.
636             ty::Adt(def, ..) if def.repr.simd() => {
637                 let element = self.layout_of(ty.simd_type(tcx))?;
638                 let count = ty.simd_size(tcx) as u64;
639                 assert!(count > 0);
640                 let scalar = match element.abi {
641                     Abi::Scalar(ref scalar) => scalar.clone(),
642                     _ => {
643                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
644                                                  a non-machine element type `{}`",
645                                                 ty, element.ty));
646                     }
647                 };
648                 let size = element.size.checked_mul(count, dl)
649                     .ok_or(LayoutError::SizeOverflow(ty))?;
650                 let align = dl.vector_align(size);
651                 let size = size.abi_align(align);
652
653                 tcx.intern_layout(LayoutDetails {
654                     variants: Variants::Single { index: VariantIdx::new(0) },
655                     fields: FieldPlacement::Array {
656                         stride: element.size,
657                         count
658                     },
659                     abi: Abi::Vector {
660                         element: scalar,
661                         count
662                     },
663                     size,
664                     align,
665                 })
666             }
667
668             // ADTs.
669             ty::Adt(def, substs) => {
670                 // Cache the field layouts.
671                 let variants = def.variants.iter().map(|v| {
672                     v.fields.iter().map(|field| {
673                         self.layout_of(field.ty(tcx, substs))
674                     }).collect::<Result<Vec<_>, _>>()
675                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
676
677                 if def.is_union() {
678                     let packed = def.repr.packed();
679                     if packed && def.repr.align > 0 {
680                         bug!("Union cannot be packed and aligned");
681                     }
682
683                     let pack = {
684                         let pack = def.repr.pack as u64;
685                         Align::from_bytes(pack, pack).unwrap()
686                     };
687
688                     let mut align = if packed {
689                         dl.i8_align
690                     } else {
691                         dl.aggregate_align
692                     };
693
694                     if def.repr.align > 0 {
695                         let repr_align = def.repr.align as u64;
696                         align = align.max(
697                             Align::from_bytes(repr_align, repr_align).unwrap());
698                     }
699
700                     let optimize = !def.repr.inhibit_union_abi_opt();
701                     let mut size = Size::ZERO;
702                     let mut abi = Abi::Aggregate { sized: true };
703                     let index = VariantIdx::new(0);
704                     for field in &variants[index] {
705                         assert!(!field.is_unsized());
706
707                         if packed {
708                             let field_pack = field.align.min(pack);
709                             align = align.max(field_pack);
710                         } else {
711                             align = align.max(field.align);
712                         }
713
714                         // If all non-ZST fields have the same ABI, forward this ABI
715                         if optimize && !field.is_zst() {
716                             // Normalize scalar_unit to the maximal valid range
717                             let field_abi = match &field.abi {
718                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
719                                 Abi::ScalarPair(x, y) => {
720                                     Abi::ScalarPair(
721                                         scalar_unit(x.value),
722                                         scalar_unit(y.value),
723                                     )
724                                 }
725                                 Abi::Vector { element: x, count } => {
726                                     Abi::Vector {
727                                         element: scalar_unit(x.value),
728                                         count: *count,
729                                     }
730                                 }
731                                 Abi::Uninhabited |
732                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
733                             };
734
735                             if size == Size::ZERO {
736                                 // first non ZST: initialize 'abi'
737                                 abi = field_abi;
738                             } else if abi != field_abi  {
739                                 // different fields have different ABI: reset to Aggregate
740                                 abi = Abi::Aggregate { sized: true };
741                             }
742                         }
743
744                         size = cmp::max(size, field.size);
745                     }
746
747                     return Ok(tcx.intern_layout(LayoutDetails {
748                         variants: Variants::Single { index },
749                         fields: FieldPlacement::Union(variants[index].len()),
750                         abi,
751                         align,
752                         size: size.abi_align(align)
753                     }));
754                 }
755
756                 // A variant is absent if it's uninhabited and only has ZST fields.
757                 // Present uninhabited variants only require space for their fields,
758                 // but *not* an encoding of the discriminant (e.g. a tag value).
759                 // See issue #49298 for more details on the need to leave space
760                 // for non-ZST uninhabited data (mostly partial initialization).
761                 let absent = |fields: &[TyLayout<'_>]| {
762                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
763                     let is_zst = fields.iter().all(|f| f.is_zst());
764                     uninhabited && is_zst
765                 };
766                 let (present_first, present_second) = {
767                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
768                         if absent(v) {
769                             None
770                         } else {
771                             Some(i)
772                         }
773                     });
774                     (present_variants.next(), present_variants.next())
775                 };
776                 if present_first.is_none() {
777                     // Uninhabited because it has no variants, or only absent ones.
778                     return tcx.layout_raw(param_env.and(tcx.types.never));
779                 }
780
781                 let is_struct = !def.is_enum() ||
782                     // Only one variant is present.
783                     (present_second.is_none() &&
784                     // Representation optimizations are allowed.
785                     !def.repr.inhibit_enum_layout_opt());
786                 if is_struct {
787                     // Struct, or univariant enum equivalent to a struct.
788                     // (Typechecking will reject discriminant-sizing attrs.)
789
790                     let v = present_first.unwrap();
791                     let kind = if def.is_enum() || variants[v].len() == 0 {
792                         StructKind::AlwaysSized
793                     } else {
794                         let param_env = tcx.param_env(def.did);
795                         let last_field = def.variants[v].fields.last().unwrap();
796                         let always_sized = tcx.type_of(last_field.did)
797                                               .is_sized(tcx.at(DUMMY_SP), param_env);
798                         if !always_sized { StructKind::MaybeUnsized }
799                         else { StructKind::AlwaysSized }
800                     };
801
802                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
803                     st.variants = Variants::Single { index: v };
804                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
805                     match st.abi {
806                         Abi::Scalar(ref mut scalar) |
807                         Abi::ScalarPair(ref mut scalar, _) => {
808                             // the asserts ensure that we are not using the
809                             // `#[rustc_layout_scalar_valid_range(n)]`
810                             // attribute to widen the range of anything as that would probably
811                             // result in UB somewhere
812                             if let Bound::Included(start) = start {
813                                 assert!(*scalar.valid_range.start() <= start);
814                                 scalar.valid_range = start..=*scalar.valid_range.end();
815                             }
816                             if let Bound::Included(end) = end {
817                                 assert!(*scalar.valid_range.end() >= end);
818                                 scalar.valid_range = *scalar.valid_range.start()..=end;
819                             }
820                         }
821                         _ => assert!(
822                             start == Bound::Unbounded && end == Bound::Unbounded,
823                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
824                             def,
825                             st,
826                         ),
827                     }
828                     return Ok(tcx.intern_layout(st));
829                 }
830
831                 // The current code for niche-filling relies on variant indices
832                 // instead of actual discriminants, so dataful enums with
833                 // explicit discriminants (RFC #2363) would misbehave.
834                 let no_explicit_discriminants = def.variants.iter_enumerated()
835                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
836
837                 // Niche-filling enum optimization.
838                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
839                     let mut dataful_variant = None;
840                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
841
842                     // Find one non-ZST variant.
843                     'variants: for (v, fields) in variants.iter_enumerated() {
844                         if absent(fields) {
845                             continue 'variants;
846                         }
847                         for f in fields {
848                             if !f.is_zst() {
849                                 if dataful_variant.is_none() {
850                                     dataful_variant = Some(v);
851                                     continue 'variants;
852                                 } else {
853                                     dataful_variant = None;
854                                     break 'variants;
855                                 }
856                             }
857                         }
858                         niche_variants = *niche_variants.start().min(&v)..=v;
859                     }
860
861                     if niche_variants.start() > niche_variants.end() {
862                         dataful_variant = None;
863                     }
864
865                     if let Some(i) = dataful_variant {
866                         let count = (
867                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
868                         ) as u128;
869                         for (field_index, &field) in variants[i].iter().enumerate() {
870                             let niche = match self.find_niche(field)? {
871                                 Some(niche) => niche,
872                                 _ => continue,
873                             };
874                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
875                                 Some(pair) => pair,
876                                 None => continue,
877                             };
878
879                             let mut align = dl.aggregate_align;
880                             let st = variants.iter_enumerated().map(|(j, v)| {
881                                 let mut st = univariant_uninterned(v,
882                                     &def.repr, StructKind::AlwaysSized)?;
883                                 st.variants = Variants::Single { index: j };
884
885                                 align = align.max(st.align);
886
887                                 Ok(st)
888                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
889
890                             let offset = st[i].fields.offset(field_index) + niche.offset;
891                             let size = st[i].size;
892
893                             let mut abi = match st[i].abi {
894                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
895                                 Abi::ScalarPair(ref first, ref second) => {
896                                     // We need to use scalar_unit to reset the
897                                     // valid range to the maximal one for that
898                                     // primitive, because only the niche is
899                                     // guaranteed to be initialised, not the
900                                     // other primitive.
901                                     if offset.bytes() == 0 {
902                                         Abi::ScalarPair(
903                                             niche_scalar.clone(),
904                                             scalar_unit(second.value),
905                                         )
906                                     } else {
907                                         Abi::ScalarPair(
908                                             scalar_unit(first.value),
909                                             niche_scalar.clone(),
910                                         )
911                                     }
912                                 }
913                                 _ => Abi::Aggregate { sized: true },
914                             };
915
916                             if st.iter().all(|v| v.abi.is_uninhabited()) {
917                                 abi = Abi::Uninhabited;
918                             }
919
920                             return Ok(tcx.intern_layout(LayoutDetails {
921                                 variants: Variants::NicheFilling {
922                                     dataful_variant: i,
923                                     niche_variants,
924                                     niche: niche_scalar,
925                                     niche_start,
926                                     variants: st,
927                                 },
928                                 fields: FieldPlacement::Arbitrary {
929                                     offsets: vec![offset],
930                                     memory_index: vec![0]
931                                 },
932                                 abi,
933                                 size,
934                                 align,
935                             }));
936                         }
937                     }
938                 }
939
940                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
941                 let discr_type = def.repr.discr_type();
942                 let bits = Integer::from_attr(self, discr_type).size().bits();
943                 for (i, discr) in def.discriminants(tcx) {
944                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
945                         continue;
946                     }
947                     let mut x = discr.val as i128;
948                     if discr_type.is_signed() {
949                         // sign extend the raw representation to be an i128
950                         x = (x << (128 - bits)) >> (128 - bits);
951                     }
952                     if x < min { min = x; }
953                     if x > max { max = x; }
954                 }
955                 // We might have no inhabited variants, so pretend there's at least one.
956                 if (min, max) == (i128::max_value(), i128::min_value()) {
957                     min = 0;
958                     max = 0;
959                 }
960                 assert!(min <= max, "discriminant range is {}...{}", min, max);
961                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
962
963                 let mut align = dl.aggregate_align;
964                 let mut size = Size::ZERO;
965
966                 // We're interested in the smallest alignment, so start large.
967                 let mut start_align = Align::from_bytes(256, 256).unwrap();
968                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
969
970                 // repr(C) on an enum tells us to make a (tag, union) layout,
971                 // so we need to grow the prefix alignment to be at least
972                 // the alignment of the union. (This value is used both for
973                 // determining the alignment of the overall enum, and the
974                 // determining the alignment of the payload after the tag.)
975                 let mut prefix_align = min_ity.align(dl);
976                 if def.repr.c() {
977                     for fields in &variants {
978                         for field in fields {
979                             prefix_align = prefix_align.max(field.align);
980                         }
981                     }
982                 }
983
984                 // Create the set of structs that represent each variant.
985                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
986                     let mut st = univariant_uninterned(&field_layouts,
987                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
988                     st.variants = Variants::Single { index: i };
989                     // Find the first field we can't move later
990                     // to make room for a larger discriminant.
991                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
992                         if !field.is_zst() || field.align.abi() != 1 {
993                             start_align = start_align.min(field.align);
994                             break;
995                         }
996                     }
997                     size = cmp::max(size, st.size);
998                     align = align.max(st.align);
999                     Ok(st)
1000                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1001
1002                 // Align the maximum variant size to the largest alignment.
1003                 size = size.abi_align(align);
1004
1005                 if size.bytes() >= dl.obj_size_bound() {
1006                     return Err(LayoutError::SizeOverflow(ty));
1007                 }
1008
1009                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1010                 if typeck_ity < min_ity {
1011                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1012                     // some reason at this point (based on values discriminant can take on). Mostly
1013                     // because this discriminant will be loaded, and then stored into variable of
1014                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1015                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1016                     // discriminant values. That would be a bug, because then, in codegen, in order
1017                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1018                     // space necessary to represent would have to be discarded (or layout is wrong
1019                     // on thinking it needs 16 bits)
1020                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1021                          min_ity, typeck_ity);
1022                     // However, it is fine to make discr type however large (as an optimisation)
1023                     // after this point â€“ we’ll just truncate the value we load in codegen.
1024                 }
1025
1026                 // Check to see if we should use a different type for the
1027                 // discriminant. We can safely use a type with the same size
1028                 // as the alignment of the first field of each variant.
1029                 // We increase the size of the discriminant to avoid LLVM copying
1030                 // padding when it doesn't need to. This normally causes unaligned
1031                 // load/stores and excessive memcpy/memset operations. By using a
1032                 // bigger integer size, LLVM can be sure about its contents and
1033                 // won't be so conservative.
1034
1035                 // Use the initial field alignment
1036                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1037                     min_ity
1038                 } else {
1039                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1040                 };
1041
1042                 // If the alignment is not larger than the chosen discriminant size,
1043                 // don't use the alignment as the final size.
1044                 if ity <= min_ity {
1045                     ity = min_ity;
1046                 } else {
1047                     // Patch up the variants' first few fields.
1048                     let old_ity_size = min_ity.size();
1049                     let new_ity_size = ity.size();
1050                     for variant in &mut layout_variants {
1051                         match variant.fields {
1052                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1053                                 for i in offsets {
1054                                     if *i <= old_ity_size {
1055                                         assert_eq!(*i, old_ity_size);
1056                                         *i = new_ity_size;
1057                                     }
1058                                 }
1059                                 // We might be making the struct larger.
1060                                 if variant.size <= old_ity_size {
1061                                     variant.size = new_ity_size;
1062                                 }
1063                             }
1064                             _ => bug!()
1065                         }
1066                     }
1067                 }
1068
1069                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1070                 let tag = Scalar {
1071                     value: Int(ity, signed),
1072                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1073                 };
1074                 let mut abi = Abi::Aggregate { sized: true };
1075                 if tag.value.size(dl) == size {
1076                     abi = Abi::Scalar(tag.clone());
1077                 } else {
1078                     // Try to use a ScalarPair for all tagged enums.
1079                     let mut common_prim = None;
1080                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1081                         let offsets = match layout_variant.fields {
1082                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1083                             _ => bug!(),
1084                         };
1085                         let mut fields = field_layouts
1086                             .iter()
1087                             .zip(offsets)
1088                             .filter(|p| !p.0.is_zst());
1089                         let (field, offset) = match (fields.next(), fields.next()) {
1090                             (None, None) => continue,
1091                             (Some(pair), None) => pair,
1092                             _ => {
1093                                 common_prim = None;
1094                                 break;
1095                             }
1096                         };
1097                         let prim = match field.details.abi {
1098                             Abi::Scalar(ref scalar) => scalar.value,
1099                             _ => {
1100                                 common_prim = None;
1101                                 break;
1102                             }
1103                         };
1104                         if let Some(pair) = common_prim {
1105                             // This is pretty conservative. We could go fancier
1106                             // by conflating things like i32 and u32, or even
1107                             // realising that (u8, u8) could just cohabit with
1108                             // u16 or even u32.
1109                             if pair != (prim, offset) {
1110                                 common_prim = None;
1111                                 break;
1112                             }
1113                         } else {
1114                             common_prim = Some((prim, offset));
1115                         }
1116                     }
1117                     if let Some((prim, offset)) = common_prim {
1118                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1119                         let pair_offsets = match pair.fields {
1120                             FieldPlacement::Arbitrary {
1121                                 ref offsets,
1122                                 ref memory_index
1123                             } => {
1124                                 assert_eq!(memory_index, &[0, 1]);
1125                                 offsets
1126                             }
1127                             _ => bug!()
1128                         };
1129                         if pair_offsets[0] == Size::ZERO &&
1130                             pair_offsets[1] == *offset &&
1131                             align == pair.align &&
1132                             size == pair.size {
1133                             // We can use `ScalarPair` only when it matches our
1134                             // already computed layout (including `#[repr(C)]`).
1135                             abi = pair.abi;
1136                         }
1137                     }
1138                 }
1139
1140                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1141                     abi = Abi::Uninhabited;
1142                 }
1143
1144                 tcx.intern_layout(LayoutDetails {
1145                     variants: Variants::Tagged {
1146                         tag,
1147                         variants: layout_variants,
1148                     },
1149                     fields: FieldPlacement::Arbitrary {
1150                         offsets: vec![Size::ZERO],
1151                         memory_index: vec![0]
1152                     },
1153                     abi,
1154                     align,
1155                     size
1156                 })
1157             }
1158
1159             // Types with no meaningful known layout.
1160             ty::Projection(_) | ty::Opaque(..) => {
1161                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1162                 if ty == normalized {
1163                     return Err(LayoutError::Unknown(ty));
1164                 }
1165                 tcx.layout_raw(param_env.and(normalized))?
1166             }
1167
1168             ty::Bound(..) |
1169             ty::UnnormalizedProjection(..) |
1170             ty::GeneratorWitness(..) |
1171             ty::Infer(_) => {
1172                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1173             }
1174
1175             ty::Param(_) | ty::Error => {
1176                 return Err(LayoutError::Unknown(ty));
1177             }
1178         })
1179     }
1180
1181     /// This is invoked by the `layout_raw` query to record the final
1182     /// layout of each type.
1183     #[inline]
1184     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1185         // If we are running with `-Zprint-type-sizes`, record layouts for
1186         // dumping later. Ignore layouts that are done with non-empty
1187         // environments or non-monomorphic layouts, as the user only wants
1188         // to see the stuff resulting from the final codegen session.
1189         if
1190             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1191             layout.ty.has_param_types() ||
1192             layout.ty.has_self_ty() ||
1193             !self.param_env.caller_bounds.is_empty()
1194         {
1195             return;
1196         }
1197
1198         self.record_layout_for_printing_outlined(layout)
1199     }
1200
1201     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1202         // (delay format until we actually need it)
1203         let record = |kind, packed, opt_discr_size, variants| {
1204             let type_desc = format!("{:?}", layout.ty);
1205             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1206                                                                    type_desc,
1207                                                                    layout.align,
1208                                                                    layout.size,
1209                                                                    packed,
1210                                                                    opt_discr_size,
1211                                                                    variants);
1212         };
1213
1214         let adt_def = match layout.ty.sty {
1215             ty::Adt(ref adt_def, _) => {
1216                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1217                 adt_def
1218             }
1219
1220             ty::Closure(..) => {
1221                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1222                 record(DataTypeKind::Closure, false, None, vec![]);
1223                 return;
1224             }
1225
1226             _ => {
1227                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1228                 return;
1229             }
1230         };
1231
1232         let adt_kind = adt_def.adt_kind();
1233         let adt_packed = adt_def.repr.packed();
1234
1235         let build_variant_info = |n: Option<ast::Name>,
1236                                   flds: &[ast::Name],
1237                                   layout: TyLayout<'tcx>| {
1238             let mut min_size = Size::ZERO;
1239             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1240                 match layout.field(self, i) {
1241                     Err(err) => {
1242                         bug!("no layout found for field {}: `{:?}`", name, err);
1243                     }
1244                     Ok(field_layout) => {
1245                         let offset = layout.fields.offset(i);
1246                         let field_end = offset + field_layout.size;
1247                         if min_size < field_end {
1248                             min_size = field_end;
1249                         }
1250                         session::FieldInfo {
1251                             name: name.to_string(),
1252                             offset: offset.bytes(),
1253                             size: field_layout.size.bytes(),
1254                             align: field_layout.align.abi(),
1255                         }
1256                     }
1257                 }
1258             }).collect();
1259
1260             session::VariantInfo {
1261                 name: n.map(|n|n.to_string()),
1262                 kind: if layout.is_unsized() {
1263                     session::SizeKind::Min
1264                 } else {
1265                     session::SizeKind::Exact
1266                 },
1267                 align: layout.align.abi(),
1268                 size: if min_size.bytes() == 0 {
1269                     layout.size.bytes()
1270                 } else {
1271                     min_size.bytes()
1272                 },
1273                 fields: field_info,
1274             }
1275         };
1276
1277         match layout.variants {
1278             Variants::Single { index } => {
1279                 debug!("print-type-size `{:#?}` variant {}",
1280                        layout, adt_def.variants[index].name);
1281                 if !adt_def.variants.is_empty() {
1282                     let variant_def = &adt_def.variants[index];
1283                     let fields: Vec<_> =
1284                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1285                     record(adt_kind.into(),
1286                            adt_packed,
1287                            None,
1288                            vec![build_variant_info(Some(variant_def.name),
1289                                                    &fields,
1290                                                    layout)]);
1291                 } else {
1292                     // (This case arises for *empty* enums; so give it
1293                     // zero variants.)
1294                     record(adt_kind.into(), adt_packed, None, vec![]);
1295                 }
1296             }
1297
1298             Variants::NicheFilling { .. } |
1299             Variants::Tagged { .. } => {
1300                 debug!("print-type-size `{:#?}` adt general variants def {}",
1301                        layout.ty, adt_def.variants.len());
1302                 let variant_infos: Vec<_> =
1303                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1304                         let fields: Vec<_> =
1305                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1306                         build_variant_info(Some(variant_def.name),
1307                                            &fields,
1308                                            layout.for_variant(self, i))
1309                     })
1310                     .collect();
1311                 record(adt_kind.into(), adt_packed, match layout.variants {
1312                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1313                     _ => None
1314                 }, variant_infos);
1315             }
1316         }
1317     }
1318 }
1319
1320 /// Type size "skeleton", i.e. the only information determining a type's size.
1321 /// While this is conservative, (aside from constant sizes, only pointers,
1322 /// newtypes thereof and null pointer optimized enums are allowed), it is
1323 /// enough to statically check common use cases of transmute.
1324 #[derive(Copy, Clone, Debug)]
1325 pub enum SizeSkeleton<'tcx> {
1326     /// Any statically computable Layout.
1327     Known(Size),
1328
1329     /// A potentially-fat pointer.
1330     Pointer {
1331         /// If true, this pointer is never null.
1332         non_zero: bool,
1333         /// The type which determines the unsized metadata, if any,
1334         /// of this pointer. Either a type parameter or a projection
1335         /// depending on one, with regions erased.
1336         tail: Ty<'tcx>
1337     }
1338 }
1339
1340 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1341     pub fn compute(ty: Ty<'tcx>,
1342                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1343                    param_env: ty::ParamEnv<'tcx>)
1344                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1345         debug_assert!(!ty.has_infer_types());
1346
1347         // First try computing a static layout.
1348         let err = match tcx.layout_of(param_env.and(ty)) {
1349             Ok(layout) => {
1350                 return Ok(SizeSkeleton::Known(layout.size));
1351             }
1352             Err(err) => err
1353         };
1354
1355         match ty.sty {
1356             ty::Ref(_, pointee, _) |
1357             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1358                 let non_zero = !ty.is_unsafe_ptr();
1359                 let tail = tcx.struct_tail(pointee);
1360                 match tail.sty {
1361                     ty::Param(_) | ty::Projection(_) => {
1362                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1363                         Ok(SizeSkeleton::Pointer {
1364                             non_zero,
1365                             tail: tcx.erase_regions(&tail)
1366                         })
1367                     }
1368                     _ => {
1369                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1370                               tail `{}` is not a type parameter or a projection",
1371                              ty, err, tail)
1372                     }
1373                 }
1374             }
1375
1376             ty::Adt(def, substs) => {
1377                 // Only newtypes and enums w/ nullable pointer optimization.
1378                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1379                     return Err(err);
1380                 }
1381
1382                 // Get a zero-sized variant or a pointer newtype.
1383                 let zero_or_ptr_variant = |i| {
1384                     let i = VariantIdx::new(i);
1385                     let fields = def.variants[i].fields.iter().map(|field| {
1386                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1387                     });
1388                     let mut ptr = None;
1389                     for field in fields {
1390                         let field = field?;
1391                         match field {
1392                             SizeSkeleton::Known(size) => {
1393                                 if size.bytes() > 0 {
1394                                     return Err(err);
1395                                 }
1396                             }
1397                             SizeSkeleton::Pointer {..} => {
1398                                 if ptr.is_some() {
1399                                     return Err(err);
1400                                 }
1401                                 ptr = Some(field);
1402                             }
1403                         }
1404                     }
1405                     Ok(ptr)
1406                 };
1407
1408                 let v0 = zero_or_ptr_variant(0)?;
1409                 // Newtype.
1410                 if def.variants.len() == 1 {
1411                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1412                         return Ok(SizeSkeleton::Pointer {
1413                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1414                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1415                                 (Bound::Included(start), Bound::Included(end)) =>
1416                                     0 < start && start < end,
1417                                 _ => false,
1418                             },
1419                             tail,
1420                         });
1421                     } else {
1422                         return Err(err);
1423                     }
1424                 }
1425
1426                 let v1 = zero_or_ptr_variant(1)?;
1427                 // Nullable pointer enum optimization.
1428                 match (v0, v1) {
1429                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1430                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1431                         Ok(SizeSkeleton::Pointer {
1432                             non_zero: false,
1433                             tail,
1434                         })
1435                     }
1436                     _ => Err(err)
1437                 }
1438             }
1439
1440             ty::Projection(_) | ty::Opaque(..) => {
1441                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1442                 if ty == normalized {
1443                     Err(err)
1444                 } else {
1445                     SizeSkeleton::compute(normalized, tcx, param_env)
1446                 }
1447             }
1448
1449             _ => Err(err)
1450         }
1451     }
1452
1453     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1454         match (self, other) {
1455             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1456             (SizeSkeleton::Pointer { tail: a, .. },
1457              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1458             _ => false
1459         }
1460     }
1461 }
1462
1463 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1464     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1465 }
1466
1467 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1468     fn data_layout(&self) -> &TargetDataLayout {
1469         &self.data_layout
1470     }
1471 }
1472
1473 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1474     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1475         self.global_tcx()
1476     }
1477 }
1478
1479 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1480     fn data_layout(&self) -> &TargetDataLayout {
1481         self.tcx.data_layout()
1482     }
1483 }
1484
1485 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1486     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1487         self.tcx.tcx()
1488     }
1489 }
1490
1491 pub trait MaybeResult<T> {
1492     fn from_ok(x: T) -> Self;
1493     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1494 }
1495
1496 impl<T> MaybeResult<T> for T {
1497     fn from_ok(x: T) -> Self {
1498         x
1499     }
1500     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1501         f(self)
1502     }
1503 }
1504
1505 impl<T, E> MaybeResult<T> for Result<T, E> {
1506     fn from_ok(x: T) -> Self {
1507         Ok(x)
1508     }
1509     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1510         self.map(f)
1511     }
1512 }
1513
1514 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1515
1516 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1517     type Ty = Ty<'tcx>;
1518     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1519
1520     /// Computes the layout of a type. Note that this implicitly
1521     /// executes in "reveal all" mode.
1522     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1523         let param_env = self.param_env.with_reveal_all();
1524         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1525         let details = self.tcx.layout_raw(param_env.and(ty))?;
1526         let layout = TyLayout {
1527             ty,
1528             details
1529         };
1530
1531         // NB: This recording is normally disabled; when enabled, it
1532         // can however trigger recursive invocations of `layout_of`.
1533         // Therefore, we execute it *after* the main query has
1534         // completed, to avoid problems around recursive structures
1535         // and the like. (Admittedly, I wasn't able to reproduce a problem
1536         // here, but it seems like the right thing to do. -nmatsakis)
1537         self.record_layout_for_printing(layout);
1538
1539         Ok(layout)
1540     }
1541 }
1542
1543 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1544     type Ty = Ty<'tcx>;
1545     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1546
1547     /// Computes the layout of a type. Note that this implicitly
1548     /// executes in "reveal all" mode.
1549     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1550         let param_env = self.param_env.with_reveal_all();
1551         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1552         let details = self.tcx.layout_raw(param_env.and(ty))?;
1553         let layout = TyLayout {
1554             ty,
1555             details
1556         };
1557
1558         // NB: This recording is normally disabled; when enabled, it
1559         // can however trigger recursive invocations of `layout_of`.
1560         // Therefore, we execute it *after* the main query has
1561         // completed, to avoid problems around recursive structures
1562         // and the like. (Admittedly, I wasn't able to reproduce a problem
1563         // here, but it seems like the right thing to do. -nmatsakis)
1564         let cx = LayoutCx {
1565             tcx: *self.tcx,
1566             param_env: self.param_env
1567         };
1568         cx.record_layout_for_printing(layout);
1569
1570         Ok(layout)
1571     }
1572 }
1573
1574 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1575 impl TyCtxt<'a, 'tcx, '_> {
1576     /// Computes the layout of a type. Note that this implicitly
1577     /// executes in "reveal all" mode.
1578     #[inline]
1579     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1580                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1581         let cx = LayoutCx {
1582             tcx: self.global_tcx(),
1583             param_env: param_env_and_ty.param_env
1584         };
1585         cx.layout_of(param_env_and_ty.value)
1586     }
1587 }
1588
1589 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1590     /// Computes the layout of a type. Note that this implicitly
1591     /// executes in "reveal all" mode.
1592     #[inline]
1593     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1594                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1595         let cx = LayoutCx {
1596             tcx: self.global_tcx().at(self.span),
1597             param_env: param_env_and_ty.param_env
1598         };
1599         cx.layout_of(param_env_and_ty.value)
1600     }
1601 }
1602
1603 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1604     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1605           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1606 {
1607     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1608         let details = match this.variants {
1609             Variants::Single { index } if index == variant_index => this.details,
1610
1611             Variants::Single { index } => {
1612                 // Deny calling for_variant more than once for non-Single enums.
1613                 cx.layout_of(this.ty).map_same(|layout| {
1614                     assert_eq!(layout.variants, Variants::Single { index });
1615                     layout
1616                 });
1617
1618                 let fields = match this.ty.sty {
1619                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1620                     _ => bug!()
1621                 };
1622                 let tcx = cx.tcx();
1623                 tcx.intern_layout(LayoutDetails {
1624                     variants: Variants::Single { index: variant_index },
1625                     fields: FieldPlacement::Union(fields),
1626                     abi: Abi::Uninhabited,
1627                     align: tcx.data_layout.i8_align,
1628                     size: Size::ZERO
1629                 })
1630             }
1631
1632             Variants::NicheFilling { ref variants, .. } |
1633             Variants::Tagged { ref variants, .. } => {
1634                 &variants[variant_index]
1635             }
1636         };
1637
1638         assert_eq!(details.variants, Variants::Single { index: variant_index });
1639
1640         TyLayout {
1641             ty: this.ty,
1642             details
1643         }
1644     }
1645
1646     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1647         let tcx = cx.tcx();
1648         cx.layout_of(match this.ty.sty {
1649             ty::Bool |
1650             ty::Char |
1651             ty::Int(_) |
1652             ty::Uint(_) |
1653             ty::Float(_) |
1654             ty::FnPtr(_) |
1655             ty::Never |
1656             ty::FnDef(..) |
1657             ty::GeneratorWitness(..) |
1658             ty::Foreign(..) |
1659             ty::Dynamic(..) => {
1660                 bug!("TyLayout::field_type({:?}): not applicable", this)
1661             }
1662
1663             // Potentially-fat pointers.
1664             ty::Ref(_, pointee, _) |
1665             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1666                 assert!(i < this.fields.count());
1667
1668                 // Reuse the fat *T type as its own thin pointer data field.
1669                 // This provides information about e.g. DST struct pointees
1670                 // (which may have no non-DST form), and will work as long
1671                 // as the `Abi` or `FieldPlacement` is checked by users.
1672                 if i == 0 {
1673                     let nil = tcx.mk_unit();
1674                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1675                         tcx.mk_mut_ptr(nil)
1676                     } else {
1677                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1678                     };
1679                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1680                         ptr_layout.ty = this.ty;
1681                         ptr_layout
1682                     });
1683                 }
1684
1685                 match tcx.struct_tail(pointee).sty {
1686                     ty::Slice(_) |
1687                     ty::Str => tcx.types.usize,
1688                     ty::Dynamic(_, _) => {
1689                         tcx.mk_imm_ref(
1690                             tcx.types.re_static,
1691                             tcx.mk_array(tcx.types.usize, 3),
1692                         )
1693                         /* FIXME use actual fn pointers
1694                         Warning: naively computing the number of entries in the
1695                         vtable by counting the methods on the trait + methods on
1696                         all parent traits does not work, because some methods can
1697                         be not object safe and thus excluded from the vtable.
1698                         Increase this counter if you tried to implement this but
1699                         failed to do it without duplicating a lot of code from
1700                         other places in the compiler: 2
1701                         tcx.mk_tup(&[
1702                             tcx.mk_array(tcx.types.usize, 3),
1703                             tcx.mk_array(Option<fn()>),
1704                         ])
1705                         */
1706                     }
1707                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1708                 }
1709             }
1710
1711             // Arrays and slices.
1712             ty::Array(element, _) |
1713             ty::Slice(element) => element,
1714             ty::Str => tcx.types.u8,
1715
1716             // Tuples, generators and closures.
1717             ty::Closure(def_id, ref substs) => {
1718                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1719             }
1720
1721             ty::Generator(def_id, ref substs, _) => {
1722                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1723             }
1724
1725             ty::Tuple(tys) => tys[i],
1726
1727             // SIMD vector types.
1728             ty::Adt(def, ..) if def.repr.simd() => {
1729                 this.ty.simd_type(tcx)
1730             }
1731
1732             // ADTs.
1733             ty::Adt(def, substs) => {
1734                 match this.variants {
1735                     Variants::Single { index } => {
1736                         def.variants[index].fields[i].ty(tcx, substs)
1737                     }
1738
1739                     // Discriminant field for enums (where applicable).
1740                     Variants::Tagged { tag: ref discr, .. } |
1741                     Variants::NicheFilling { niche: ref discr, .. } => {
1742                         assert_eq!(i, 0);
1743                         let layout = LayoutDetails::scalar(cx, discr.clone());
1744                         return MaybeResult::from_ok(TyLayout {
1745                             details: tcx.intern_layout(layout),
1746                             ty: discr.value.to_ty(tcx)
1747                         });
1748                     }
1749                 }
1750             }
1751
1752             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1753             ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1754                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1755             }
1756         })
1757     }
1758 }
1759
1760 struct Niche {
1761     offset: Size,
1762     scalar: Scalar,
1763     available: u128,
1764 }
1765
1766 impl Niche {
1767     fn reserve<'a, 'tcx>(
1768         &self,
1769         cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1770         count: u128,
1771     ) -> Option<(u128, Scalar)> {
1772         if count > self.available {
1773             return None;
1774         }
1775         let Scalar { value, valid_range: ref v } = self.scalar;
1776         let bits = value.size(cx).bits();
1777         assert!(bits <= 128);
1778         let max_value = !0u128 >> (128 - bits);
1779         let start = v.end().wrapping_add(1) & max_value;
1780         let end = v.end().wrapping_add(count) & max_value;
1781         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1782     }
1783 }
1784
1785 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1786     /// Find the offset of a niche leaf field, starting from
1787     /// the given type and recursing through aggregates.
1788     // FIXME(eddyb) traverse already optimized enums.
1789     fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1790         let scalar_niche = |scalar: &Scalar, offset| {
1791             let Scalar { value, valid_range: ref v } = *scalar;
1792
1793             let bits = value.size(self).bits();
1794             assert!(bits <= 128);
1795             let max_value = !0u128 >> (128 - bits);
1796
1797             // Find out how many values are outside the valid range.
1798             let available = if v.start() <= v.end() {
1799                 v.start() + (max_value - v.end())
1800             } else {
1801                 v.start() - v.end() - 1
1802             };
1803
1804             // Give up if there is no niche value available.
1805             if available == 0 {
1806                 return None;
1807             }
1808
1809             Some(Niche { offset, scalar: scalar.clone(), available })
1810         };
1811
1812         // Locals variables which live across yields are stored
1813         // in the generator type as fields. These may be uninitialized
1814         // so we don't look for niches there.
1815         if let ty::Generator(..) = layout.ty.sty {
1816             return Ok(None);
1817         }
1818
1819         match layout.abi {
1820             Abi::Scalar(ref scalar) => {
1821                 return Ok(scalar_niche(scalar, Size::ZERO));
1822             }
1823             Abi::ScalarPair(ref a, ref b) => {
1824                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1825                 // returns the last maximum.
1826                 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1827                     .chain(iter::once((a, Size::ZERO)))
1828                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1829                     .max_by_key(|niche| niche.available);
1830                 return Ok(niche);
1831             }
1832             Abi::Vector { ref element, .. } => {
1833                 return Ok(scalar_niche(element, Size::ZERO));
1834             }
1835             _ => {}
1836         }
1837
1838         // Perhaps one of the fields is non-zero, let's recurse and find out.
1839         if let FieldPlacement::Union(_) = layout.fields {
1840             // Only Rust enums have safe-to-inspect fields
1841             // (a discriminant), other unions are unsafe.
1842             if let Variants::Single { .. } = layout.variants {
1843                 return Ok(None);
1844             }
1845         }
1846         if let FieldPlacement::Array { .. } = layout.fields {
1847             if layout.fields.count() > 0 {
1848                 return self.find_niche(layout.field(self, 0)?);
1849             } else {
1850                 return Ok(None);
1851             }
1852         }
1853         let mut niche = None;
1854         let mut available = 0;
1855         for i in 0..layout.fields.count() {
1856             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1857                 if c.available > available {
1858                     available = c.available;
1859                     c.offset += layout.fields.offset(i);
1860                     niche = Some(c);
1861                 }
1862             }
1863         }
1864         Ok(niche)
1865     }
1866 }
1867
1868 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1869     fn hash_stable<W: StableHasherResult>(&self,
1870                                           hcx: &mut StableHashingContext<'a>,
1871                                           hasher: &mut StableHasher<W>) {
1872         use ty::layout::Variants::*;
1873         mem::discriminant(self).hash_stable(hcx, hasher);
1874
1875         match *self {
1876             Single { index } => {
1877                 index.hash_stable(hcx, hasher);
1878             }
1879             Tagged {
1880                 ref tag,
1881                 ref variants,
1882             } => {
1883                 tag.hash_stable(hcx, hasher);
1884                 variants.hash_stable(hcx, hasher);
1885             }
1886             NicheFilling {
1887                 dataful_variant,
1888                 ref niche_variants,
1889                 ref niche,
1890                 niche_start,
1891                 ref variants,
1892             } => {
1893                 dataful_variant.hash_stable(hcx, hasher);
1894                 niche_variants.start().hash_stable(hcx, hasher);
1895                 niche_variants.end().hash_stable(hcx, hasher);
1896                 niche.hash_stable(hcx, hasher);
1897                 niche_start.hash_stable(hcx, hasher);
1898                 variants.hash_stable(hcx, hasher);
1899             }
1900         }
1901     }
1902 }
1903
1904 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1905     fn hash_stable<W: StableHasherResult>(&self,
1906                                           hcx: &mut StableHashingContext<'a>,
1907                                           hasher: &mut StableHasher<W>) {
1908         use ty::layout::FieldPlacement::*;
1909         mem::discriminant(self).hash_stable(hcx, hasher);
1910
1911         match *self {
1912             Union(count) => {
1913                 count.hash_stable(hcx, hasher);
1914             }
1915             Array { count, stride } => {
1916                 count.hash_stable(hcx, hasher);
1917                 stride.hash_stable(hcx, hasher);
1918             }
1919             Arbitrary { ref offsets, ref memory_index } => {
1920                 offsets.hash_stable(hcx, hasher);
1921                 memory_index.hash_stable(hcx, hasher);
1922             }
1923         }
1924     }
1925 }
1926
1927 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1928     fn hash_stable<W: StableHasherResult>(
1929         &self,
1930         hcx: &mut StableHashingContext<'a>,
1931         hasher: &mut StableHasher<W>,
1932     ) {
1933         self.as_u32().hash_stable(hcx, hasher)
1934     }
1935 }
1936
1937 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1938     fn hash_stable<W: StableHasherResult>(&self,
1939                                           hcx: &mut StableHashingContext<'a>,
1940                                           hasher: &mut StableHasher<W>) {
1941         use ty::layout::Abi::*;
1942         mem::discriminant(self).hash_stable(hcx, hasher);
1943
1944         match *self {
1945             Uninhabited => {}
1946             Scalar(ref value) => {
1947                 value.hash_stable(hcx, hasher);
1948             }
1949             ScalarPair(ref a, ref b) => {
1950                 a.hash_stable(hcx, hasher);
1951                 b.hash_stable(hcx, hasher);
1952             }
1953             Vector { ref element, count } => {
1954                 element.hash_stable(hcx, hasher);
1955                 count.hash_stable(hcx, hasher);
1956             }
1957             Aggregate { sized } => {
1958                 sized.hash_stable(hcx, hasher);
1959             }
1960         }
1961     }
1962 }
1963
1964 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1965     fn hash_stable<W: StableHasherResult>(&self,
1966                                           hcx: &mut StableHashingContext<'a>,
1967                                           hasher: &mut StableHasher<W>) {
1968         let Scalar { value, ref valid_range } = *self;
1969         value.hash_stable(hcx, hasher);
1970         valid_range.start().hash_stable(hcx, hasher);
1971         valid_range.end().hash_stable(hcx, hasher);
1972     }
1973 }
1974
1975 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1976     variants,
1977     fields,
1978     abi,
1979     size,
1980     align
1981 });
1982
1983 impl_stable_hash_for!(enum ::ty::layout::Integer {
1984     I8,
1985     I16,
1986     I32,
1987     I64,
1988     I128
1989 });
1990
1991 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1992     Int(integer, signed),
1993     Float(fty),
1994     Pointer
1995 });
1996
1997 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1998     fn hash_stable<W: StableHasherResult>(&self,
1999                                           hcx: &mut StableHashingContext<'gcx>,
2000                                           hasher: &mut StableHasher<W>) {
2001         self.abi().hash_stable(hcx, hasher);
2002         self.pref().hash_stable(hcx, hasher);
2003     }
2004 }
2005
2006 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2007     fn hash_stable<W: StableHasherResult>(&self,
2008                                           hcx: &mut StableHashingContext<'gcx>,
2009                                           hasher: &mut StableHasher<W>) {
2010         self.bytes().hash_stable(hcx, hasher);
2011     }
2012 }
2013
2014 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2015 {
2016     fn hash_stable<W: StableHasherResult>(&self,
2017                                           hcx: &mut StableHashingContext<'a>,
2018                                           hasher: &mut StableHasher<W>) {
2019         use ty::layout::LayoutError::*;
2020         mem::discriminant(self).hash_stable(hcx, hasher);
2021
2022         match *self {
2023             Unknown(t) |
2024             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2025         }
2026     }
2027 }