]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
5406495226d798cd041e7fc90d39739991773099
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23 use std::ops::Bound;
24
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
28                                            StableHasherResult};
29
30 pub use rustc_target::abi::*;
31
32 pub trait IntegerExt {
33     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
36                             ty: Ty<'tcx>,
37                             repr: &ReprOptions,
38                             min: i128,
39                             max: i128)
40                             -> (Integer, bool);
41 }
42
43 impl IntegerExt for Integer {
44     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45         match (*self, signed) {
46             (I8, false) => tcx.types.u8,
47             (I16, false) => tcx.types.u16,
48             (I32, false) => tcx.types.u32,
49             (I64, false) => tcx.types.u64,
50             (I128, false) => tcx.types.u128,
51             (I8, true) => tcx.types.i8,
52             (I16, true) => tcx.types.i16,
53             (I32, true) => tcx.types.i32,
54             (I64, true) => tcx.types.i64,
55             (I128, true) => tcx.types.i128,
56         }
57     }
58
59     /// Get the Integer type from an attr::IntType.
60     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61         let dl = cx.data_layout();
62
63         match ity {
64             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70                 dl.ptr_sized_integer()
71             }
72         }
73     }
74
75     /// Find the appropriate Integer type and signedness for the given
76     /// signed discriminant range and #[repr] attribute.
77     /// N.B.: u128 values above i128::MAX will be treated as signed, but
78     /// that shouldn't affect anything, other than maybe debuginfo.
79     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
80                             ty: Ty<'tcx>,
81                             repr: &ReprOptions,
82                             min: i128,
83                             max: i128)
84                             -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100                       discriminant range of enum `{}", ty)
101             }
102             return (discr, ity.is_signed());
103         }
104
105         if repr.c() {
106             match &tcx.sess.target.target.arch[..] {
107                 // WARNING: the ARM EABI has two variants; the one corresponding
108                 // to `at_least == I32` appears to be used on Linux and NetBSD,
109                 // but some systems may use the variant corresponding to no
110                 // lower bound. However, we don't run on those yet...?
111                 "arm" => min_from_extern = Some(I32),
112                 _ => min_from_extern = Some(I32),
113             }
114         }
115
116         let at_least = min_from_extern.unwrap_or(min_default);
117
118         // If there are no negative values, we can use the unsigned fit.
119         if min >= 0 {
120             (cmp::max(unsigned_fit, at_least), false)
121         } else {
122             (cmp::max(signed_fit, at_least), true)
123         }
124     }
125 }
126
127 pub trait PrimitiveExt {
128     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
129 }
130
131 impl PrimitiveExt for Primitive {
132     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
133         match *self {
134             Int(i, signed) => i.to_ty(tcx, signed),
135             Float(FloatTy::F32) => tcx.types.f32,
136             Float(FloatTy::F64) => tcx.types.f64,
137             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
138         }
139     }
140 }
141
142 /// The first half of a fat pointer.
143 ///
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
147
148 /// The second half of a fat pointer.
149 ///
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
153
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
156     Unknown(Ty<'tcx>),
157     SizeOverflow(Ty<'tcx>)
158 }
159
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162         match *self {
163             LayoutError::Unknown(ty) => {
164                 write!(f, "the type `{:?}` has an unknown layout", ty)
165             }
166             LayoutError::SizeOverflow(ty) => {
167                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
168             }
169         }
170     }
171 }
172
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
176 {
177     ty::tls::with_related_context(tcx, move |icx| {
178         let rec_limit = *tcx.sess.recursion_limit.get();
179         let (param_env, ty) = query.into_parts();
180
181         if icx.layout_depth > rec_limit {
182             tcx.sess.fatal(
183                 &format!("overflow representing the type `{}`", ty));
184         }
185
186         // Update the ImplicitCtxt to increase the layout_depth
187         let icx = ty::tls::ImplicitCtxt {
188             layout_depth: icx.layout_depth + 1,
189             ..icx.clone()
190         };
191
192         ty::tls::enter_context(&icx, |_| {
193             let cx = LayoutCx { tcx, param_env };
194             cx.layout_raw_uncached(ty)
195         })
196     })
197 }
198
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200     *providers = ty::query::Providers {
201         layout_raw,
202         ..*providers
203     };
204 }
205
206 pub struct LayoutCx<'tcx, C> {
207     pub tcx: C,
208     pub param_env: ty::ParamEnv<'tcx>
209 }
210
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212     fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214         let tcx = self.tcx;
215         let param_env = self.param_env;
216         let dl = self.data_layout();
217         let scalar_unit = |value: Primitive| {
218             let bits = value.size(dl).bits();
219             assert!(bits <= 128);
220             Scalar {
221                 value,
222                 valid_range: 0..=(!0 >> (128 - bits))
223             }
224         };
225         let scalar = |value: Primitive| {
226             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227         };
228         let scalar_pair = |a: Scalar, b: Scalar| {
229             let b_align = b.value.align(dl);
230             let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
231             let b_offset = a.value.size(dl).align_to(b_align.abi);
232             let size = (b_offset + b.value.size(dl)).align_to(align.abi);
233             LayoutDetails {
234                 variants: Variants::Single { index: VariantIdx::new(0) },
235                 fields: FieldPlacement::Arbitrary {
236                     offsets: vec![Size::ZERO, b_offset],
237                     memory_index: vec![0, 1]
238                 },
239                 abi: Abi::ScalarPair(a, b),
240                 align,
241                 size
242             }
243         };
244
245         #[derive(Copy, Clone, Debug)]
246         enum StructKind {
247             /// A tuple, closure, or univariant which cannot be coerced to unsized.
248             AlwaysSized,
249             /// A univariant, the last field of which may be coerced to unsized.
250             MaybeUnsized,
251             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
252             Prefixed(Size, Align),
253         }
254
255         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
256             let packed = repr.packed();
257             if packed && repr.align > 0 {
258                 bug!("struct cannot be packed and aligned");
259             }
260
261             let pack = Align::from_bytes(repr.pack as u64).unwrap();
262
263             let mut align = if packed {
264                 dl.i8_align
265             } else {
266                 dl.aggregate_align
267             };
268
269             let mut sized = true;
270             let mut offsets = vec![Size::ZERO; fields.len()];
271             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
272
273             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
274             if let StructKind::Prefixed(_, align) = kind {
275                 optimize &= align.bytes() == 1;
276             }
277
278             if optimize {
279                 let end = if let StructKind::MaybeUnsized = kind {
280                     fields.len() - 1
281                 } else {
282                     fields.len()
283                 };
284                 let optimizing = &mut inverse_memory_index[..end];
285                 let field_align = |f: &TyLayout<'_>| {
286                     if packed { f.align.abi.min(pack) } else { f.align.abi }
287                 };
288                 match kind {
289                     StructKind::AlwaysSized |
290                     StructKind::MaybeUnsized => {
291                         optimizing.sort_by_key(|&x| {
292                             // Place ZSTs first to avoid "interesting offsets",
293                             // especially with only one or two non-ZST fields.
294                             let f = &fields[x as usize];
295                             (!f.is_zst(), cmp::Reverse(field_align(f)))
296                         });
297                     }
298                     StructKind::Prefixed(..) => {
299                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
300                     }
301                 }
302             }
303
304             // inverse_memory_index holds field indices by increasing memory offset.
305             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
306             // We now write field offsets to the corresponding offset slot;
307             // field 5 with offset 0 puts 0 in offsets[5].
308             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
309
310             let mut offset = Size::ZERO;
311
312             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
313                 let prefix_align = if packed {
314                     prefix_align.min(pack)
315                 } else {
316                     prefix_align
317                 };
318                 align = align.max(AbiAndPrefAlign::new(prefix_align));
319                 offset = prefix_size.align_to(prefix_align);
320             }
321
322             for &i in &inverse_memory_index {
323                 let field = fields[i as usize];
324                 if !sized {
325                     bug!("univariant: field #{} of `{}` comes after unsized field",
326                          offsets.len(), ty);
327                 }
328
329                 if field.is_unsized() {
330                     sized = false;
331                 }
332
333                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
334                 let field_align = if packed {
335                     field.align.min(AbiAndPrefAlign::new(pack))
336                 } else {
337                     field.align
338                 };
339                 offset = offset.align_to(field_align.abi);
340                 align = align.max(field_align);
341
342                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
343                 offsets[i as usize] = offset;
344
345                 offset = offset.checked_add(field.size, dl)
346                     .ok_or(LayoutError::SizeOverflow(ty))?;
347             }
348
349             if repr.align > 0 {
350                 let repr_align = repr.align as u64;
351                 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
352                 debug!("univariant repr_align: {:?}", repr_align);
353             }
354
355             debug!("univariant min_size: {:?}", offset);
356             let min_size = offset;
357
358             // As stated above, inverse_memory_index holds field indices by increasing offset.
359             // This makes it an already-sorted view of the offsets vec.
360             // To invert it, consider:
361             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
362             // Field 5 would be the first element, so memory_index is i:
363             // Note: if we didn't optimize, it's already right.
364
365             let mut memory_index;
366             if optimize {
367                 memory_index = vec![0; inverse_memory_index.len()];
368
369                 for i in 0..inverse_memory_index.len() {
370                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
371                 }
372             } else {
373                 memory_index = inverse_memory_index;
374             }
375
376             let size = min_size.align_to(align.abi);
377             let mut abi = Abi::Aggregate { sized };
378
379             // Unpack newtype ABIs and find scalar pairs.
380             if sized && size.bytes() > 0 {
381                 // All other fields must be ZSTs, and we need them to all start at 0.
382                 let mut zst_offsets =
383                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
384                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
385                     let mut non_zst_fields =
386                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
387
388                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
389                         // We have exactly one non-ZST field.
390                         (Some((i, field)), None, None) => {
391                             // Field fills the struct and it has a scalar or scalar pair ABI.
392                             if offsets[i].bytes() == 0 &&
393                                align.abi == field.align.abi &&
394                                size == field.size {
395                                 match field.abi {
396                                     // For plain scalars, or vectors of them, we can't unpack
397                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
398                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
399                                         abi = field.abi.clone();
400                                     }
401                                     // But scalar pairs are Rust-specific and get
402                                     // treated as aggregates by C ABIs anyway.
403                                     Abi::ScalarPair(..) => {
404                                         abi = field.abi.clone();
405                                     }
406                                     _ => {}
407                                 }
408                             }
409                         }
410
411                         // Two non-ZST fields, and they're both scalars.
412                         (Some((i, &TyLayout {
413                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
414                         })), Some((j, &TyLayout {
415                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
416                         })), None) => {
417                             // Order by the memory placement, not source order.
418                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
419                                 ((i, a), (j, b))
420                             } else {
421                                 ((j, b), (i, a))
422                             };
423                             let pair = scalar_pair(a.clone(), b.clone());
424                             let pair_offsets = match pair.fields {
425                                 FieldPlacement::Arbitrary {
426                                     ref offsets,
427                                     ref memory_index
428                                 } => {
429                                     assert_eq!(memory_index, &[0, 1]);
430                                     offsets
431                                 }
432                                 _ => bug!()
433                             };
434                             if offsets[i] == pair_offsets[0] &&
435                                offsets[j] == pair_offsets[1] &&
436                                align == pair.align &&
437                                size == pair.size {
438                                 // We can use `ScalarPair` only when it matches our
439                                 // already computed layout (including `#[repr(C)]`).
440                                 abi = pair.abi;
441                             }
442                         }
443
444                         _ => {}
445                     }
446                 }
447             }
448
449             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
450                 abi = Abi::Uninhabited;
451             }
452
453             Ok(LayoutDetails {
454                 variants: Variants::Single { index: VariantIdx::new(0) },
455                 fields: FieldPlacement::Arbitrary {
456                     offsets,
457                     memory_index
458                 },
459                 abi,
460                 align,
461                 size
462             })
463         };
464         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
465             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
466         };
467         debug_assert!(!ty.has_infer_types());
468
469         Ok(match ty.sty {
470             // Basic scalars.
471             ty::Bool => {
472                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
473                     value: Int(I8, false),
474                     valid_range: 0..=1
475                 }))
476             }
477             ty::Char => {
478                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
479                     value: Int(I32, false),
480                     valid_range: 0..=0x10FFFF
481                 }))
482             }
483             ty::Int(ity) => {
484                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
485             }
486             ty::Uint(ity) => {
487                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
488             }
489             ty::Float(fty) => scalar(Float(fty)),
490             ty::FnPtr(_) => {
491                 let mut ptr = scalar_unit(Pointer);
492                 ptr.valid_range = 1..=*ptr.valid_range.end();
493                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
494             }
495
496             // The never type.
497             ty::Never => {
498                 tcx.intern_layout(LayoutDetails {
499                     variants: Variants::Single { index: VariantIdx::new(0) },
500                     fields: FieldPlacement::Union(0),
501                     abi: Abi::Uninhabited,
502                     align: dl.i8_align,
503                     size: Size::ZERO
504                 })
505             }
506
507             // Potentially-fat pointers.
508             ty::Ref(_, pointee, _) |
509             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
510                 let mut data_ptr = scalar_unit(Pointer);
511                 if !ty.is_unsafe_ptr() {
512                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
513                 }
514
515                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
516                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
517                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
518                 }
519
520                 let unsized_part = tcx.struct_tail(pointee);
521                 let metadata = match unsized_part.sty {
522                     ty::Foreign(..) => {
523                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
524                     }
525                     ty::Slice(_) | ty::Str => {
526                         scalar_unit(Int(dl.ptr_sized_integer(), false))
527                     }
528                     ty::Dynamic(..) => {
529                         let mut vtable = scalar_unit(Pointer);
530                         vtable.valid_range = 1..=*vtable.valid_range.end();
531                         vtable
532                     }
533                     _ => return Err(LayoutError::Unknown(unsized_part))
534                 };
535
536                 // Effectively a (ptr, meta) tuple.
537                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
538             }
539
540             // Arrays and slices.
541             ty::Array(element, mut count) => {
542                 if count.has_projections() {
543                     count = tcx.normalize_erasing_regions(param_env, count);
544                     if count.has_projections() {
545                         return Err(LayoutError::Unknown(ty));
546                     }
547                 }
548
549                 let element = self.layout_of(element)?;
550                 let count = count.unwrap_usize(tcx);
551                 let size = element.size.checked_mul(count, dl)
552                     .ok_or(LayoutError::SizeOverflow(ty))?;
553
554                 tcx.intern_layout(LayoutDetails {
555                     variants: Variants::Single { index: VariantIdx::new(0) },
556                     fields: FieldPlacement::Array {
557                         stride: element.size,
558                         count
559                     },
560                     abi: Abi::Aggregate { sized: true },
561                     align: element.align,
562                     size
563                 })
564             }
565             ty::Slice(element) => {
566                 let element = self.layout_of(element)?;
567                 tcx.intern_layout(LayoutDetails {
568                     variants: Variants::Single { index: VariantIdx::new(0) },
569                     fields: FieldPlacement::Array {
570                         stride: element.size,
571                         count: 0
572                     },
573                     abi: Abi::Aggregate { sized: false },
574                     align: element.align,
575                     size: Size::ZERO
576                 })
577             }
578             ty::Str => {
579                 tcx.intern_layout(LayoutDetails {
580                     variants: Variants::Single { index: VariantIdx::new(0) },
581                     fields: FieldPlacement::Array {
582                         stride: Size::from_bytes(1),
583                         count: 0
584                     },
585                     abi: Abi::Aggregate { sized: false },
586                     align: dl.i8_align,
587                     size: Size::ZERO
588                 })
589             }
590
591             // Odd unit types.
592             ty::FnDef(..) => {
593                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
594             }
595             ty::Dynamic(..) | ty::Foreign(..) => {
596                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
597                   StructKind::AlwaysSized)?;
598                 match unit.abi {
599                     Abi::Aggregate { ref mut sized } => *sized = false,
600                     _ => bug!()
601                 }
602                 tcx.intern_layout(unit)
603             }
604
605             // Tuples, generators and closures.
606             ty::Generator(def_id, ref substs, _) => {
607                 let tys = substs.field_tys(def_id, tcx);
608                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
609                     &ReprOptions::default(),
610                     StructKind::AlwaysSized)?
611             }
612
613             ty::Closure(def_id, ref substs) => {
614                 let tys = substs.upvar_tys(def_id, tcx);
615                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
616                     &ReprOptions::default(),
617                     StructKind::AlwaysSized)?
618             }
619
620             ty::Tuple(tys) => {
621                 let kind = if tys.len() == 0 {
622                     StructKind::AlwaysSized
623                 } else {
624                     StructKind::MaybeUnsized
625                 };
626
627                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
628                            &ReprOptions::default(), kind)?
629             }
630
631             // SIMD vector types.
632             ty::Adt(def, ..) if def.repr.simd() => {
633                 let element = self.layout_of(ty.simd_type(tcx))?;
634                 let count = ty.simd_size(tcx) as u64;
635                 assert!(count > 0);
636                 let scalar = match element.abi {
637                     Abi::Scalar(ref scalar) => scalar.clone(),
638                     _ => {
639                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
640                                                  a non-machine element type `{}`",
641                                                 ty, element.ty));
642                     }
643                 };
644                 let size = element.size.checked_mul(count, dl)
645                     .ok_or(LayoutError::SizeOverflow(ty))?;
646                 let align = dl.vector_align(size);
647                 let size = size.align_to(align.abi);
648
649                 tcx.intern_layout(LayoutDetails {
650                     variants: Variants::Single { index: VariantIdx::new(0) },
651                     fields: FieldPlacement::Array {
652                         stride: element.size,
653                         count
654                     },
655                     abi: Abi::Vector {
656                         element: scalar,
657                         count
658                     },
659                     size,
660                     align,
661                 })
662             }
663
664             // ADTs.
665             ty::Adt(def, substs) => {
666                 // Cache the field layouts.
667                 let variants = def.variants.iter().map(|v| {
668                     v.fields.iter().map(|field| {
669                         self.layout_of(field.ty(tcx, substs))
670                     }).collect::<Result<Vec<_>, _>>()
671                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
672
673                 if def.is_union() {
674                     let packed = def.repr.packed();
675                     if packed && def.repr.align > 0 {
676                         bug!("Union cannot be packed and aligned");
677                     }
678
679                     let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
680
681                     let mut align = if packed {
682                         dl.i8_align
683                     } else {
684                         dl.aggregate_align
685                     };
686
687                     if def.repr.align > 0 {
688                         let repr_align = def.repr.align as u64;
689                         align = align.max(
690                             AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
691                     }
692
693                     let optimize = !def.repr.inhibit_union_abi_opt();
694                     let mut size = Size::ZERO;
695                     let mut abi = Abi::Aggregate { sized: true };
696                     let index = VariantIdx::new(0);
697                     for field in &variants[index] {
698                         assert!(!field.is_unsized());
699
700                         let field_align = if packed {
701                             field.align.min(AbiAndPrefAlign::new(pack))
702                         } else {
703                             field.align
704                         };
705                         align = align.max(field_align);
706
707                         // If all non-ZST fields have the same ABI, forward this ABI
708                         if optimize && !field.is_zst() {
709                             // Normalize scalar_unit to the maximal valid range
710                             let field_abi = match &field.abi {
711                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
712                                 Abi::ScalarPair(x, y) => {
713                                     Abi::ScalarPair(
714                                         scalar_unit(x.value),
715                                         scalar_unit(y.value),
716                                     )
717                                 }
718                                 Abi::Vector { element: x, count } => {
719                                     Abi::Vector {
720                                         element: scalar_unit(x.value),
721                                         count: *count,
722                                     }
723                                 }
724                                 Abi::Uninhabited |
725                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
726                             };
727
728                             if size == Size::ZERO {
729                                 // first non ZST: initialize 'abi'
730                                 abi = field_abi;
731                             } else if abi != field_abi  {
732                                 // different fields have different ABI: reset to Aggregate
733                                 abi = Abi::Aggregate { sized: true };
734                             }
735                         }
736
737                         size = cmp::max(size, field.size);
738                     }
739
740                     return Ok(tcx.intern_layout(LayoutDetails {
741                         variants: Variants::Single { index },
742                         fields: FieldPlacement::Union(variants[index].len()),
743                         abi,
744                         align,
745                         size: size.align_to(align.abi)
746                     }));
747                 }
748
749                 // A variant is absent if it's uninhabited and only has ZST fields.
750                 // Present uninhabited variants only require space for their fields,
751                 // but *not* an encoding of the discriminant (e.g. a tag value).
752                 // See issue #49298 for more details on the need to leave space
753                 // for non-ZST uninhabited data (mostly partial initialization).
754                 let absent = |fields: &[TyLayout<'_>]| {
755                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
756                     let is_zst = fields.iter().all(|f| f.is_zst());
757                     uninhabited && is_zst
758                 };
759                 let (present_first, present_second) = {
760                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
761                         if absent(v) {
762                             None
763                         } else {
764                             Some(i)
765                         }
766                     });
767                     (present_variants.next(), present_variants.next())
768                 };
769                 if present_first.is_none() {
770                     // Uninhabited because it has no variants, or only absent ones.
771                     return tcx.layout_raw(param_env.and(tcx.types.never));
772                 }
773
774                 let is_struct = !def.is_enum() ||
775                     // Only one variant is present.
776                     (present_second.is_none() &&
777                     // Representation optimizations are allowed.
778                     !def.repr.inhibit_enum_layout_opt());
779                 if is_struct {
780                     // Struct, or univariant enum equivalent to a struct.
781                     // (Typechecking will reject discriminant-sizing attrs.)
782
783                     let v = present_first.unwrap();
784                     let kind = if def.is_enum() || variants[v].len() == 0 {
785                         StructKind::AlwaysSized
786                     } else {
787                         let param_env = tcx.param_env(def.did);
788                         let last_field = def.variants[v].fields.last().unwrap();
789                         let always_sized = tcx.type_of(last_field.did)
790                                               .is_sized(tcx.at(DUMMY_SP), param_env);
791                         if !always_sized { StructKind::MaybeUnsized }
792                         else { StructKind::AlwaysSized }
793                     };
794
795                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
796                     st.variants = Variants::Single { index: v };
797                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
798                     match st.abi {
799                         Abi::Scalar(ref mut scalar) |
800                         Abi::ScalarPair(ref mut scalar, _) => {
801                             // the asserts ensure that we are not using the
802                             // `#[rustc_layout_scalar_valid_range(n)]`
803                             // attribute to widen the range of anything as that would probably
804                             // result in UB somewhere
805                             if let Bound::Included(start) = start {
806                                 assert!(*scalar.valid_range.start() <= start);
807                                 scalar.valid_range = start..=*scalar.valid_range.end();
808                             }
809                             if let Bound::Included(end) = end {
810                                 assert!(*scalar.valid_range.end() >= end);
811                                 scalar.valid_range = *scalar.valid_range.start()..=end;
812                             }
813                         }
814                         _ => assert!(
815                             start == Bound::Unbounded && end == Bound::Unbounded,
816                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
817                             def,
818                             st,
819                         ),
820                     }
821                     return Ok(tcx.intern_layout(st));
822                 }
823
824                 // The current code for niche-filling relies on variant indices
825                 // instead of actual discriminants, so dataful enums with
826                 // explicit discriminants (RFC #2363) would misbehave.
827                 let no_explicit_discriminants = def.variants.iter_enumerated()
828                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
829
830                 // Niche-filling enum optimization.
831                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
832                     let mut dataful_variant = None;
833                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
834
835                     // Find one non-ZST variant.
836                     'variants: for (v, fields) in variants.iter_enumerated() {
837                         if absent(fields) {
838                             continue 'variants;
839                         }
840                         for f in fields {
841                             if !f.is_zst() {
842                                 if dataful_variant.is_none() {
843                                     dataful_variant = Some(v);
844                                     continue 'variants;
845                                 } else {
846                                     dataful_variant = None;
847                                     break 'variants;
848                                 }
849                             }
850                         }
851                         niche_variants = *niche_variants.start().min(&v)..=v;
852                     }
853
854                     if niche_variants.start() > niche_variants.end() {
855                         dataful_variant = None;
856                     }
857
858                     if let Some(i) = dataful_variant {
859                         let count = (
860                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
861                         ) as u128;
862                         for (field_index, &field) in variants[i].iter().enumerate() {
863                             let niche = match self.find_niche(field)? {
864                                 Some(niche) => niche,
865                                 _ => continue,
866                             };
867                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
868                                 Some(pair) => pair,
869                                 None => continue,
870                             };
871
872                             let mut align = dl.aggregate_align;
873                             let st = variants.iter_enumerated().map(|(j, v)| {
874                                 let mut st = univariant_uninterned(v,
875                                     &def.repr, StructKind::AlwaysSized)?;
876                                 st.variants = Variants::Single { index: j };
877
878                                 align = align.max(st.align);
879
880                                 Ok(st)
881                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
882
883                             let offset = st[i].fields.offset(field_index) + niche.offset;
884                             let size = st[i].size;
885
886                             let mut abi = match st[i].abi {
887                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
888                                 Abi::ScalarPair(ref first, ref second) => {
889                                     // We need to use scalar_unit to reset the
890                                     // valid range to the maximal one for that
891                                     // primitive, because only the niche is
892                                     // guaranteed to be initialised, not the
893                                     // other primitive.
894                                     if offset.bytes() == 0 {
895                                         Abi::ScalarPair(
896                                             niche_scalar.clone(),
897                                             scalar_unit(second.value),
898                                         )
899                                     } else {
900                                         Abi::ScalarPair(
901                                             scalar_unit(first.value),
902                                             niche_scalar.clone(),
903                                         )
904                                     }
905                                 }
906                                 _ => Abi::Aggregate { sized: true },
907                             };
908
909                             if st.iter().all(|v| v.abi.is_uninhabited()) {
910                                 abi = Abi::Uninhabited;
911                             }
912
913                             return Ok(tcx.intern_layout(LayoutDetails {
914                                 variants: Variants::NicheFilling {
915                                     dataful_variant: i,
916                                     niche_variants,
917                                     niche: niche_scalar,
918                                     niche_start,
919                                     variants: st,
920                                 },
921                                 fields: FieldPlacement::Arbitrary {
922                                     offsets: vec![offset],
923                                     memory_index: vec![0]
924                                 },
925                                 abi,
926                                 size,
927                                 align,
928                             }));
929                         }
930                     }
931                 }
932
933                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
934                 let discr_type = def.repr.discr_type();
935                 let bits = Integer::from_attr(self, discr_type).size().bits();
936                 for (i, discr) in def.discriminants(tcx) {
937                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
938                         continue;
939                     }
940                     let mut x = discr.val as i128;
941                     if discr_type.is_signed() {
942                         // sign extend the raw representation to be an i128
943                         x = (x << (128 - bits)) >> (128 - bits);
944                     }
945                     if x < min { min = x; }
946                     if x > max { max = x; }
947                 }
948                 // We might have no inhabited variants, so pretend there's at least one.
949                 if (min, max) == (i128::max_value(), i128::min_value()) {
950                     min = 0;
951                     max = 0;
952                 }
953                 assert!(min <= max, "discriminant range is {}...{}", min, max);
954                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
955
956                 let mut align = dl.aggregate_align;
957                 let mut size = Size::ZERO;
958
959                 // We're interested in the smallest alignment, so start large.
960                 let mut start_align = Align::from_bytes(256).unwrap();
961                 assert_eq!(Integer::for_align(dl, start_align), None);
962
963                 // repr(C) on an enum tells us to make a (tag, union) layout,
964                 // so we need to grow the prefix alignment to be at least
965                 // the alignment of the union. (This value is used both for
966                 // determining the alignment of the overall enum, and the
967                 // determining the alignment of the payload after the tag.)
968                 let mut prefix_align = min_ity.align(dl).abi;
969                 if def.repr.c() {
970                     for fields in &variants {
971                         for field in fields {
972                             prefix_align = prefix_align.max(field.align.abi);
973                         }
974                     }
975                 }
976
977                 // Create the set of structs that represent each variant.
978                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
979                     let mut st = univariant_uninterned(&field_layouts,
980                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
981                     st.variants = Variants::Single { index: i };
982                     // Find the first field we can't move later
983                     // to make room for a larger discriminant.
984                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
985                         if !field.is_zst() || field.align.abi.bytes() != 1 {
986                             start_align = start_align.min(field.align.abi);
987                             break;
988                         }
989                     }
990                     size = cmp::max(size, st.size);
991                     align = align.max(st.align);
992                     Ok(st)
993                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
994
995                 // Align the maximum variant size to the largest alignment.
996                 size = size.align_to(align.abi);
997
998                 if size.bytes() >= dl.obj_size_bound() {
999                     return Err(LayoutError::SizeOverflow(ty));
1000                 }
1001
1002                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1003                 if typeck_ity < min_ity {
1004                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1005                     // some reason at this point (based on values discriminant can take on). Mostly
1006                     // because this discriminant will be loaded, and then stored into variable of
1007                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1008                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1009                     // discriminant values. That would be a bug, because then, in codegen, in order
1010                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1011                     // space necessary to represent would have to be discarded (or layout is wrong
1012                     // on thinking it needs 16 bits)
1013                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1014                          min_ity, typeck_ity);
1015                     // However, it is fine to make discr type however large (as an optimisation)
1016                     // after this point â€“ we’ll just truncate the value we load in codegen.
1017                 }
1018
1019                 // Check to see if we should use a different type for the
1020                 // discriminant. We can safely use a type with the same size
1021                 // as the alignment of the first field of each variant.
1022                 // We increase the size of the discriminant to avoid LLVM copying
1023                 // padding when it doesn't need to. This normally causes unaligned
1024                 // load/stores and excessive memcpy/memset operations. By using a
1025                 // bigger integer size, LLVM can be sure about its contents and
1026                 // won't be so conservative.
1027
1028                 // Use the initial field alignment
1029                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1030                     min_ity
1031                 } else {
1032                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1033                 };
1034
1035                 // If the alignment is not larger than the chosen discriminant size,
1036                 // don't use the alignment as the final size.
1037                 if ity <= min_ity {
1038                     ity = min_ity;
1039                 } else {
1040                     // Patch up the variants' first few fields.
1041                     let old_ity_size = min_ity.size();
1042                     let new_ity_size = ity.size();
1043                     for variant in &mut layout_variants {
1044                         match variant.fields {
1045                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1046                                 for i in offsets {
1047                                     if *i <= old_ity_size {
1048                                         assert_eq!(*i, old_ity_size);
1049                                         *i = new_ity_size;
1050                                     }
1051                                 }
1052                                 // We might be making the struct larger.
1053                                 if variant.size <= old_ity_size {
1054                                     variant.size = new_ity_size;
1055                                 }
1056                             }
1057                             _ => bug!()
1058                         }
1059                     }
1060                 }
1061
1062                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1063                 let tag = Scalar {
1064                     value: Int(ity, signed),
1065                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1066                 };
1067                 let mut abi = Abi::Aggregate { sized: true };
1068                 if tag.value.size(dl) == size {
1069                     abi = Abi::Scalar(tag.clone());
1070                 } else {
1071                     // Try to use a ScalarPair for all tagged enums.
1072                     let mut common_prim = None;
1073                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1074                         let offsets = match layout_variant.fields {
1075                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1076                             _ => bug!(),
1077                         };
1078                         let mut fields = field_layouts
1079                             .iter()
1080                             .zip(offsets)
1081                             .filter(|p| !p.0.is_zst());
1082                         let (field, offset) = match (fields.next(), fields.next()) {
1083                             (None, None) => continue,
1084                             (Some(pair), None) => pair,
1085                             _ => {
1086                                 common_prim = None;
1087                                 break;
1088                             }
1089                         };
1090                         let prim = match field.details.abi {
1091                             Abi::Scalar(ref scalar) => scalar.value,
1092                             _ => {
1093                                 common_prim = None;
1094                                 break;
1095                             }
1096                         };
1097                         if let Some(pair) = common_prim {
1098                             // This is pretty conservative. We could go fancier
1099                             // by conflating things like i32 and u32, or even
1100                             // realising that (u8, u8) could just cohabit with
1101                             // u16 or even u32.
1102                             if pair != (prim, offset) {
1103                                 common_prim = None;
1104                                 break;
1105                             }
1106                         } else {
1107                             common_prim = Some((prim, offset));
1108                         }
1109                     }
1110                     if let Some((prim, offset)) = common_prim {
1111                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1112                         let pair_offsets = match pair.fields {
1113                             FieldPlacement::Arbitrary {
1114                                 ref offsets,
1115                                 ref memory_index
1116                             } => {
1117                                 assert_eq!(memory_index, &[0, 1]);
1118                                 offsets
1119                             }
1120                             _ => bug!()
1121                         };
1122                         if pair_offsets[0] == Size::ZERO &&
1123                             pair_offsets[1] == *offset &&
1124                             align == pair.align &&
1125                             size == pair.size {
1126                             // We can use `ScalarPair` only when it matches our
1127                             // already computed layout (including `#[repr(C)]`).
1128                             abi = pair.abi;
1129                         }
1130                     }
1131                 }
1132
1133                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1134                     abi = Abi::Uninhabited;
1135                 }
1136
1137                 tcx.intern_layout(LayoutDetails {
1138                     variants: Variants::Tagged {
1139                         tag,
1140                         variants: layout_variants,
1141                     },
1142                     fields: FieldPlacement::Arbitrary {
1143                         offsets: vec![Size::ZERO],
1144                         memory_index: vec![0]
1145                     },
1146                     abi,
1147                     align,
1148                     size
1149                 })
1150             }
1151
1152             // Types with no meaningful known layout.
1153             ty::Projection(_) | ty::Opaque(..) => {
1154                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1155                 if ty == normalized {
1156                     return Err(LayoutError::Unknown(ty));
1157                 }
1158                 tcx.layout_raw(param_env.and(normalized))?
1159             }
1160
1161             ty::Bound(..) |
1162             ty::Placeholder(..) |
1163             ty::UnnormalizedProjection(..) |
1164             ty::GeneratorWitness(..) |
1165             ty::Infer(_) => {
1166                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1167             }
1168
1169             ty::Param(_) | ty::Error => {
1170                 return Err(LayoutError::Unknown(ty));
1171             }
1172         })
1173     }
1174
1175     /// This is invoked by the `layout_raw` query to record the final
1176     /// layout of each type.
1177     #[inline]
1178     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1179         // If we are running with `-Zprint-type-sizes`, record layouts for
1180         // dumping later. Ignore layouts that are done with non-empty
1181         // environments or non-monomorphic layouts, as the user only wants
1182         // to see the stuff resulting from the final codegen session.
1183         if
1184             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1185             layout.ty.has_param_types() ||
1186             layout.ty.has_self_ty() ||
1187             !self.param_env.caller_bounds.is_empty()
1188         {
1189             return;
1190         }
1191
1192         self.record_layout_for_printing_outlined(layout)
1193     }
1194
1195     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1196         // (delay format until we actually need it)
1197         let record = |kind, packed, opt_discr_size, variants| {
1198             let type_desc = format!("{:?}", layout.ty);
1199             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1200                                                                    type_desc,
1201                                                                    layout.align.abi,
1202                                                                    layout.size,
1203                                                                    packed,
1204                                                                    opt_discr_size,
1205                                                                    variants);
1206         };
1207
1208         let adt_def = match layout.ty.sty {
1209             ty::Adt(ref adt_def, _) => {
1210                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1211                 adt_def
1212             }
1213
1214             ty::Closure(..) => {
1215                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1216                 record(DataTypeKind::Closure, false, None, vec![]);
1217                 return;
1218             }
1219
1220             _ => {
1221                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1222                 return;
1223             }
1224         };
1225
1226         let adt_kind = adt_def.adt_kind();
1227         let adt_packed = adt_def.repr.packed();
1228
1229         let build_variant_info = |n: Option<ast::Name>,
1230                                   flds: &[ast::Name],
1231                                   layout: TyLayout<'tcx>| {
1232             let mut min_size = Size::ZERO;
1233             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1234                 match layout.field(self, i) {
1235                     Err(err) => {
1236                         bug!("no layout found for field {}: `{:?}`", name, err);
1237                     }
1238                     Ok(field_layout) => {
1239                         let offset = layout.fields.offset(i);
1240                         let field_end = offset + field_layout.size;
1241                         if min_size < field_end {
1242                             min_size = field_end;
1243                         }
1244                         session::FieldInfo {
1245                             name: name.to_string(),
1246                             offset: offset.bytes(),
1247                             size: field_layout.size.bytes(),
1248                             align: field_layout.align.abi.bytes(),
1249                         }
1250                     }
1251                 }
1252             }).collect();
1253
1254             session::VariantInfo {
1255                 name: n.map(|n|n.to_string()),
1256                 kind: if layout.is_unsized() {
1257                     session::SizeKind::Min
1258                 } else {
1259                     session::SizeKind::Exact
1260                 },
1261                 align: layout.align.abi.bytes(),
1262                 size: if min_size.bytes() == 0 {
1263                     layout.size.bytes()
1264                 } else {
1265                     min_size.bytes()
1266                 },
1267                 fields: field_info,
1268             }
1269         };
1270
1271         match layout.variants {
1272             Variants::Single { index } => {
1273                 debug!("print-type-size `{:#?}` variant {}",
1274                        layout, adt_def.variants[index].name);
1275                 if !adt_def.variants.is_empty() {
1276                     let variant_def = &adt_def.variants[index];
1277                     let fields: Vec<_> =
1278                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1279                     record(adt_kind.into(),
1280                            adt_packed,
1281                            None,
1282                            vec![build_variant_info(Some(variant_def.name),
1283                                                    &fields,
1284                                                    layout)]);
1285                 } else {
1286                     // (This case arises for *empty* enums; so give it
1287                     // zero variants.)
1288                     record(adt_kind.into(), adt_packed, None, vec![]);
1289                 }
1290             }
1291
1292             Variants::NicheFilling { .. } |
1293             Variants::Tagged { .. } => {
1294                 debug!("print-type-size `{:#?}` adt general variants def {}",
1295                        layout.ty, adt_def.variants.len());
1296                 let variant_infos: Vec<_> =
1297                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1298                         let fields: Vec<_> =
1299                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1300                         build_variant_info(Some(variant_def.name),
1301                                            &fields,
1302                                            layout.for_variant(self, i))
1303                     })
1304                     .collect();
1305                 record(adt_kind.into(), adt_packed, match layout.variants {
1306                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1307                     _ => None
1308                 }, variant_infos);
1309             }
1310         }
1311     }
1312 }
1313
1314 /// Type size "skeleton", i.e. the only information determining a type's size.
1315 /// While this is conservative, (aside from constant sizes, only pointers,
1316 /// newtypes thereof and null pointer optimized enums are allowed), it is
1317 /// enough to statically check common use cases of transmute.
1318 #[derive(Copy, Clone, Debug)]
1319 pub enum SizeSkeleton<'tcx> {
1320     /// Any statically computable Layout.
1321     Known(Size),
1322
1323     /// A potentially-fat pointer.
1324     Pointer {
1325         /// If true, this pointer is never null.
1326         non_zero: bool,
1327         /// The type which determines the unsized metadata, if any,
1328         /// of this pointer. Either a type parameter or a projection
1329         /// depending on one, with regions erased.
1330         tail: Ty<'tcx>
1331     }
1332 }
1333
1334 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1335     pub fn compute(ty: Ty<'tcx>,
1336                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1337                    param_env: ty::ParamEnv<'tcx>)
1338                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1339         debug_assert!(!ty.has_infer_types());
1340
1341         // First try computing a static layout.
1342         let err = match tcx.layout_of(param_env.and(ty)) {
1343             Ok(layout) => {
1344                 return Ok(SizeSkeleton::Known(layout.size));
1345             }
1346             Err(err) => err
1347         };
1348
1349         match ty.sty {
1350             ty::Ref(_, pointee, _) |
1351             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1352                 let non_zero = !ty.is_unsafe_ptr();
1353                 let tail = tcx.struct_tail(pointee);
1354                 match tail.sty {
1355                     ty::Param(_) | ty::Projection(_) => {
1356                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1357                         Ok(SizeSkeleton::Pointer {
1358                             non_zero,
1359                             tail: tcx.erase_regions(&tail)
1360                         })
1361                     }
1362                     _ => {
1363                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1364                               tail `{}` is not a type parameter or a projection",
1365                              ty, err, tail)
1366                     }
1367                 }
1368             }
1369
1370             ty::Adt(def, substs) => {
1371                 // Only newtypes and enums w/ nullable pointer optimization.
1372                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1373                     return Err(err);
1374                 }
1375
1376                 // Get a zero-sized variant or a pointer newtype.
1377                 let zero_or_ptr_variant = |i| {
1378                     let i = VariantIdx::new(i);
1379                     let fields = def.variants[i].fields.iter().map(|field| {
1380                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1381                     });
1382                     let mut ptr = None;
1383                     for field in fields {
1384                         let field = field?;
1385                         match field {
1386                             SizeSkeleton::Known(size) => {
1387                                 if size.bytes() > 0 {
1388                                     return Err(err);
1389                                 }
1390                             }
1391                             SizeSkeleton::Pointer {..} => {
1392                                 if ptr.is_some() {
1393                                     return Err(err);
1394                                 }
1395                                 ptr = Some(field);
1396                             }
1397                         }
1398                     }
1399                     Ok(ptr)
1400                 };
1401
1402                 let v0 = zero_or_ptr_variant(0)?;
1403                 // Newtype.
1404                 if def.variants.len() == 1 {
1405                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1406                         return Ok(SizeSkeleton::Pointer {
1407                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1408                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1409                                 (Bound::Included(start), Bound::Included(end)) =>
1410                                     0 < start && start < end,
1411                                 _ => false,
1412                             },
1413                             tail,
1414                         });
1415                     } else {
1416                         return Err(err);
1417                     }
1418                 }
1419
1420                 let v1 = zero_or_ptr_variant(1)?;
1421                 // Nullable pointer enum optimization.
1422                 match (v0, v1) {
1423                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1424                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1425                         Ok(SizeSkeleton::Pointer {
1426                             non_zero: false,
1427                             tail,
1428                         })
1429                     }
1430                     _ => Err(err)
1431                 }
1432             }
1433
1434             ty::Projection(_) | ty::Opaque(..) => {
1435                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1436                 if ty == normalized {
1437                     Err(err)
1438                 } else {
1439                     SizeSkeleton::compute(normalized, tcx, param_env)
1440                 }
1441             }
1442
1443             _ => Err(err)
1444         }
1445     }
1446
1447     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1448         match (self, other) {
1449             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1450             (SizeSkeleton::Pointer { tail: a, .. },
1451              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1452             _ => false
1453         }
1454     }
1455 }
1456
1457 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1458     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1459 }
1460
1461 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1462     fn data_layout(&self) -> &TargetDataLayout {
1463         &self.data_layout
1464     }
1465 }
1466
1467 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1468     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1469         self.global_tcx()
1470     }
1471 }
1472
1473 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1474     fn data_layout(&self) -> &TargetDataLayout {
1475         self.tcx.data_layout()
1476     }
1477 }
1478
1479 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1480     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1481         self.tcx.tcx()
1482     }
1483 }
1484
1485 pub trait MaybeResult<T> {
1486     fn from_ok(x: T) -> Self;
1487     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1488 }
1489
1490 impl<T> MaybeResult<T> for T {
1491     fn from_ok(x: T) -> Self {
1492         x
1493     }
1494     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1495         f(self)
1496     }
1497 }
1498
1499 impl<T, E> MaybeResult<T> for Result<T, E> {
1500     fn from_ok(x: T) -> Self {
1501         Ok(x)
1502     }
1503     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1504         self.map(f)
1505     }
1506 }
1507
1508 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1509
1510 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1511     type Ty = Ty<'tcx>;
1512     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1513
1514     /// Computes the layout of a type. Note that this implicitly
1515     /// executes in "reveal all" mode.
1516     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1517         let param_env = self.param_env.with_reveal_all();
1518         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1519         let details = self.tcx.layout_raw(param_env.and(ty))?;
1520         let layout = TyLayout {
1521             ty,
1522             details
1523         };
1524
1525         // NB: This recording is normally disabled; when enabled, it
1526         // can however trigger recursive invocations of `layout_of`.
1527         // Therefore, we execute it *after* the main query has
1528         // completed, to avoid problems around recursive structures
1529         // and the like. (Admittedly, I wasn't able to reproduce a problem
1530         // here, but it seems like the right thing to do. -nmatsakis)
1531         self.record_layout_for_printing(layout);
1532
1533         Ok(layout)
1534     }
1535 }
1536
1537 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1538     type Ty = Ty<'tcx>;
1539     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1540
1541     /// Computes the layout of a type. Note that this implicitly
1542     /// executes in "reveal all" mode.
1543     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1544         let param_env = self.param_env.with_reveal_all();
1545         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1546         let details = self.tcx.layout_raw(param_env.and(ty))?;
1547         let layout = TyLayout {
1548             ty,
1549             details
1550         };
1551
1552         // NB: This recording is normally disabled; when enabled, it
1553         // can however trigger recursive invocations of `layout_of`.
1554         // Therefore, we execute it *after* the main query has
1555         // completed, to avoid problems around recursive structures
1556         // and the like. (Admittedly, I wasn't able to reproduce a problem
1557         // here, but it seems like the right thing to do. -nmatsakis)
1558         let cx = LayoutCx {
1559             tcx: *self.tcx,
1560             param_env: self.param_env
1561         };
1562         cx.record_layout_for_printing(layout);
1563
1564         Ok(layout)
1565     }
1566 }
1567
1568 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1569 impl TyCtxt<'a, 'tcx, '_> {
1570     /// Computes the layout of a type. Note that this implicitly
1571     /// executes in "reveal all" mode.
1572     #[inline]
1573     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1574                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1575         let cx = LayoutCx {
1576             tcx: self.global_tcx(),
1577             param_env: param_env_and_ty.param_env
1578         };
1579         cx.layout_of(param_env_and_ty.value)
1580     }
1581 }
1582
1583 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1584     /// Computes the layout of a type. Note that this implicitly
1585     /// executes in "reveal all" mode.
1586     #[inline]
1587     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1588                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1589         let cx = LayoutCx {
1590             tcx: self.global_tcx().at(self.span),
1591             param_env: param_env_and_ty.param_env
1592         };
1593         cx.layout_of(param_env_and_ty.value)
1594     }
1595 }
1596
1597 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1598     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1599           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1600 {
1601     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1602         let details = match this.variants {
1603             Variants::Single { index } if index == variant_index => this.details,
1604
1605             Variants::Single { index } => {
1606                 // Deny calling for_variant more than once for non-Single enums.
1607                 cx.layout_of(this.ty).map_same(|layout| {
1608                     assert_eq!(layout.variants, Variants::Single { index });
1609                     layout
1610                 });
1611
1612                 let fields = match this.ty.sty {
1613                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1614                     _ => bug!()
1615                 };
1616                 let tcx = cx.tcx();
1617                 tcx.intern_layout(LayoutDetails {
1618                     variants: Variants::Single { index: variant_index },
1619                     fields: FieldPlacement::Union(fields),
1620                     abi: Abi::Uninhabited,
1621                     align: tcx.data_layout.i8_align,
1622                     size: Size::ZERO
1623                 })
1624             }
1625
1626             Variants::NicheFilling { ref variants, .. } |
1627             Variants::Tagged { ref variants, .. } => {
1628                 &variants[variant_index]
1629             }
1630         };
1631
1632         assert_eq!(details.variants, Variants::Single { index: variant_index });
1633
1634         TyLayout {
1635             ty: this.ty,
1636             details
1637         }
1638     }
1639
1640     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1641         let tcx = cx.tcx();
1642         cx.layout_of(match this.ty.sty {
1643             ty::Bool |
1644             ty::Char |
1645             ty::Int(_) |
1646             ty::Uint(_) |
1647             ty::Float(_) |
1648             ty::FnPtr(_) |
1649             ty::Never |
1650             ty::FnDef(..) |
1651             ty::GeneratorWitness(..) |
1652             ty::Foreign(..) |
1653             ty::Dynamic(..) => {
1654                 bug!("TyLayout::field_type({:?}): not applicable", this)
1655             }
1656
1657             // Potentially-fat pointers.
1658             ty::Ref(_, pointee, _) |
1659             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1660                 assert!(i < this.fields.count());
1661
1662                 // Reuse the fat *T type as its own thin pointer data field.
1663                 // This provides information about e.g. DST struct pointees
1664                 // (which may have no non-DST form), and will work as long
1665                 // as the `Abi` or `FieldPlacement` is checked by users.
1666                 if i == 0 {
1667                     let nil = tcx.mk_unit();
1668                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1669                         tcx.mk_mut_ptr(nil)
1670                     } else {
1671                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1672                     };
1673                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1674                         ptr_layout.ty = this.ty;
1675                         ptr_layout
1676                     });
1677                 }
1678
1679                 match tcx.struct_tail(pointee).sty {
1680                     ty::Slice(_) |
1681                     ty::Str => tcx.types.usize,
1682                     ty::Dynamic(_, _) => {
1683                         tcx.mk_imm_ref(
1684                             tcx.types.re_static,
1685                             tcx.mk_array(tcx.types.usize, 3),
1686                         )
1687                         /* FIXME use actual fn pointers
1688                         Warning: naively computing the number of entries in the
1689                         vtable by counting the methods on the trait + methods on
1690                         all parent traits does not work, because some methods can
1691                         be not object safe and thus excluded from the vtable.
1692                         Increase this counter if you tried to implement this but
1693                         failed to do it without duplicating a lot of code from
1694                         other places in the compiler: 2
1695                         tcx.mk_tup(&[
1696                             tcx.mk_array(tcx.types.usize, 3),
1697                             tcx.mk_array(Option<fn()>),
1698                         ])
1699                         */
1700                     }
1701                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1702                 }
1703             }
1704
1705             // Arrays and slices.
1706             ty::Array(element, _) |
1707             ty::Slice(element) => element,
1708             ty::Str => tcx.types.u8,
1709
1710             // Tuples, generators and closures.
1711             ty::Closure(def_id, ref substs) => {
1712                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1713             }
1714
1715             ty::Generator(def_id, ref substs, _) => {
1716                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1717             }
1718
1719             ty::Tuple(tys) => tys[i],
1720
1721             // SIMD vector types.
1722             ty::Adt(def, ..) if def.repr.simd() => {
1723                 this.ty.simd_type(tcx)
1724             }
1725
1726             // ADTs.
1727             ty::Adt(def, substs) => {
1728                 match this.variants {
1729                     Variants::Single { index } => {
1730                         def.variants[index].fields[i].ty(tcx, substs)
1731                     }
1732
1733                     // Discriminant field for enums (where applicable).
1734                     Variants::Tagged { tag: ref discr, .. } |
1735                     Variants::NicheFilling { niche: ref discr, .. } => {
1736                         assert_eq!(i, 0);
1737                         let layout = LayoutDetails::scalar(cx, discr.clone());
1738                         return MaybeResult::from_ok(TyLayout {
1739                             details: tcx.intern_layout(layout),
1740                             ty: discr.value.to_ty(tcx)
1741                         });
1742                     }
1743                 }
1744             }
1745
1746             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1747             ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
1748             ty::Error => {
1749                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1750             }
1751         })
1752     }
1753 }
1754
1755 struct Niche {
1756     offset: Size,
1757     scalar: Scalar,
1758     available: u128,
1759 }
1760
1761 impl Niche {
1762     fn reserve<'a, 'tcx>(
1763         &self,
1764         cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1765         count: u128,
1766     ) -> Option<(u128, Scalar)> {
1767         if count > self.available {
1768             return None;
1769         }
1770         let Scalar { value, valid_range: ref v } = self.scalar;
1771         let bits = value.size(cx).bits();
1772         assert!(bits <= 128);
1773         let max_value = !0u128 >> (128 - bits);
1774         let start = v.end().wrapping_add(1) & max_value;
1775         let end = v.end().wrapping_add(count) & max_value;
1776         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1777     }
1778 }
1779
1780 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1781     /// Find the offset of a niche leaf field, starting from
1782     /// the given type and recursing through aggregates.
1783     // FIXME(eddyb) traverse already optimized enums.
1784     fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1785         let scalar_niche = |scalar: &Scalar, offset| {
1786             let Scalar { value, valid_range: ref v } = *scalar;
1787
1788             let bits = value.size(self).bits();
1789             assert!(bits <= 128);
1790             let max_value = !0u128 >> (128 - bits);
1791
1792             // Find out how many values are outside the valid range.
1793             let available = if v.start() <= v.end() {
1794                 v.start() + (max_value - v.end())
1795             } else {
1796                 v.start() - v.end() - 1
1797             };
1798
1799             // Give up if there is no niche value available.
1800             if available == 0 {
1801                 return None;
1802             }
1803
1804             Some(Niche { offset, scalar: scalar.clone(), available })
1805         };
1806
1807         // Locals variables which live across yields are stored
1808         // in the generator type as fields. These may be uninitialized
1809         // so we don't look for niches there.
1810         if let ty::Generator(..) = layout.ty.sty {
1811             return Ok(None);
1812         }
1813
1814         match layout.abi {
1815             Abi::Scalar(ref scalar) => {
1816                 return Ok(scalar_niche(scalar, Size::ZERO));
1817             }
1818             Abi::ScalarPair(ref a, ref b) => {
1819                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1820                 // returns the last maximum.
1821                 let niche = iter::once(
1822                     (b, a.value.size(self).align_to(b.value.align(self).abi))
1823                 )
1824                     .chain(iter::once((a, Size::ZERO)))
1825                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1826                     .max_by_key(|niche| niche.available);
1827                 return Ok(niche);
1828             }
1829             Abi::Vector { ref element, .. } => {
1830                 return Ok(scalar_niche(element, Size::ZERO));
1831             }
1832             _ => {}
1833         }
1834
1835         // Perhaps one of the fields is non-zero, let's recurse and find out.
1836         if let FieldPlacement::Union(_) = layout.fields {
1837             // Only Rust enums have safe-to-inspect fields
1838             // (a discriminant), other unions are unsafe.
1839             if let Variants::Single { .. } = layout.variants {
1840                 return Ok(None);
1841             }
1842         }
1843         if let FieldPlacement::Array { .. } = layout.fields {
1844             if layout.fields.count() > 0 {
1845                 return self.find_niche(layout.field(self, 0)?);
1846             } else {
1847                 return Ok(None);
1848             }
1849         }
1850         let mut niche = None;
1851         let mut available = 0;
1852         for i in 0..layout.fields.count() {
1853             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1854                 if c.available > available {
1855                     available = c.available;
1856                     c.offset += layout.fields.offset(i);
1857                     niche = Some(c);
1858                 }
1859             }
1860         }
1861         Ok(niche)
1862     }
1863 }
1864
1865 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1866     fn hash_stable<W: StableHasherResult>(&self,
1867                                           hcx: &mut StableHashingContext<'a>,
1868                                           hasher: &mut StableHasher<W>) {
1869         use ty::layout::Variants::*;
1870         mem::discriminant(self).hash_stable(hcx, hasher);
1871
1872         match *self {
1873             Single { index } => {
1874                 index.hash_stable(hcx, hasher);
1875             }
1876             Tagged {
1877                 ref tag,
1878                 ref variants,
1879             } => {
1880                 tag.hash_stable(hcx, hasher);
1881                 variants.hash_stable(hcx, hasher);
1882             }
1883             NicheFilling {
1884                 dataful_variant,
1885                 ref niche_variants,
1886                 ref niche,
1887                 niche_start,
1888                 ref variants,
1889             } => {
1890                 dataful_variant.hash_stable(hcx, hasher);
1891                 niche_variants.start().hash_stable(hcx, hasher);
1892                 niche_variants.end().hash_stable(hcx, hasher);
1893                 niche.hash_stable(hcx, hasher);
1894                 niche_start.hash_stable(hcx, hasher);
1895                 variants.hash_stable(hcx, hasher);
1896             }
1897         }
1898     }
1899 }
1900
1901 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1902     fn hash_stable<W: StableHasherResult>(&self,
1903                                           hcx: &mut StableHashingContext<'a>,
1904                                           hasher: &mut StableHasher<W>) {
1905         use ty::layout::FieldPlacement::*;
1906         mem::discriminant(self).hash_stable(hcx, hasher);
1907
1908         match *self {
1909             Union(count) => {
1910                 count.hash_stable(hcx, hasher);
1911             }
1912             Array { count, stride } => {
1913                 count.hash_stable(hcx, hasher);
1914                 stride.hash_stable(hcx, hasher);
1915             }
1916             Arbitrary { ref offsets, ref memory_index } => {
1917                 offsets.hash_stable(hcx, hasher);
1918                 memory_index.hash_stable(hcx, hasher);
1919             }
1920         }
1921     }
1922 }
1923
1924 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1925     fn hash_stable<W: StableHasherResult>(
1926         &self,
1927         hcx: &mut StableHashingContext<'a>,
1928         hasher: &mut StableHasher<W>,
1929     ) {
1930         self.as_u32().hash_stable(hcx, hasher)
1931     }
1932 }
1933
1934 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1935     fn hash_stable<W: StableHasherResult>(&self,
1936                                           hcx: &mut StableHashingContext<'a>,
1937                                           hasher: &mut StableHasher<W>) {
1938         use ty::layout::Abi::*;
1939         mem::discriminant(self).hash_stable(hcx, hasher);
1940
1941         match *self {
1942             Uninhabited => {}
1943             Scalar(ref value) => {
1944                 value.hash_stable(hcx, hasher);
1945             }
1946             ScalarPair(ref a, ref b) => {
1947                 a.hash_stable(hcx, hasher);
1948                 b.hash_stable(hcx, hasher);
1949             }
1950             Vector { ref element, count } => {
1951                 element.hash_stable(hcx, hasher);
1952                 count.hash_stable(hcx, hasher);
1953             }
1954             Aggregate { sized } => {
1955                 sized.hash_stable(hcx, hasher);
1956             }
1957         }
1958     }
1959 }
1960
1961 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1962     fn hash_stable<W: StableHasherResult>(&self,
1963                                           hcx: &mut StableHashingContext<'a>,
1964                                           hasher: &mut StableHasher<W>) {
1965         let Scalar { value, ref valid_range } = *self;
1966         value.hash_stable(hcx, hasher);
1967         valid_range.start().hash_stable(hcx, hasher);
1968         valid_range.end().hash_stable(hcx, hasher);
1969     }
1970 }
1971
1972 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1973     variants,
1974     fields,
1975     abi,
1976     size,
1977     align
1978 });
1979
1980 impl_stable_hash_for!(enum ::ty::layout::Integer {
1981     I8,
1982     I16,
1983     I32,
1984     I64,
1985     I128
1986 });
1987
1988 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1989     Int(integer, signed),
1990     Float(fty),
1991     Pointer
1992 });
1993
1994 impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign {
1995     abi,
1996     pref
1997 });
1998
1999 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
2000     fn hash_stable<W: StableHasherResult>(&self,
2001                                           hcx: &mut StableHashingContext<'gcx>,
2002                                           hasher: &mut StableHasher<W>) {
2003         self.bytes().hash_stable(hcx, hasher);
2004     }
2005 }
2006
2007 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2008     fn hash_stable<W: StableHasherResult>(&self,
2009                                           hcx: &mut StableHashingContext<'gcx>,
2010                                           hasher: &mut StableHasher<W>) {
2011         self.bytes().hash_stable(hcx, hasher);
2012     }
2013 }
2014
2015 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2016 {
2017     fn hash_stable<W: StableHasherResult>(&self,
2018                                           hcx: &mut StableHashingContext<'a>,
2019                                           hasher: &mut StableHasher<W>) {
2020         use ty::layout::LayoutError::*;
2021         mem::discriminant(self).hash_stable(hcx, hasher);
2022
2023         match *self {
2024             Unknown(t) |
2025             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2026         }
2027     }
2028 }