]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
rustc_target: avoid using AbiAndPrefAlign where possible.
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23 use std::ops::Bound;
24
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
28                                            StableHasherResult};
29
30 pub use rustc_target::abi::*;
31
32 pub trait IntegerExt {
33     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
36                             ty: Ty<'tcx>,
37                             repr: &ReprOptions,
38                             min: i128,
39                             max: i128)
40                             -> (Integer, bool);
41 }
42
43 impl IntegerExt for Integer {
44     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45         match (*self, signed) {
46             (I8, false) => tcx.types.u8,
47             (I16, false) => tcx.types.u16,
48             (I32, false) => tcx.types.u32,
49             (I64, false) => tcx.types.u64,
50             (I128, false) => tcx.types.u128,
51             (I8, true) => tcx.types.i8,
52             (I16, true) => tcx.types.i16,
53             (I32, true) => tcx.types.i32,
54             (I64, true) => tcx.types.i64,
55             (I128, true) => tcx.types.i128,
56         }
57     }
58
59     /// Get the Integer type from an attr::IntType.
60     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61         let dl = cx.data_layout();
62
63         match ity {
64             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70                 dl.ptr_sized_integer()
71             }
72         }
73     }
74
75     /// Find the appropriate Integer type and signedness for the given
76     /// signed discriminant range and #[repr] attribute.
77     /// N.B.: u128 values above i128::MAX will be treated as signed, but
78     /// that shouldn't affect anything, other than maybe debuginfo.
79     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
80                             ty: Ty<'tcx>,
81                             repr: &ReprOptions,
82                             min: i128,
83                             max: i128)
84                             -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100                       discriminant range of enum `{}", ty)
101             }
102             return (discr, ity.is_signed());
103         }
104
105         if repr.c() {
106             match &tcx.sess.target.target.arch[..] {
107                 // WARNING: the ARM EABI has two variants; the one corresponding
108                 // to `at_least == I32` appears to be used on Linux and NetBSD,
109                 // but some systems may use the variant corresponding to no
110                 // lower bound. However, we don't run on those yet...?
111                 "arm" => min_from_extern = Some(I32),
112                 _ => min_from_extern = Some(I32),
113             }
114         }
115
116         let at_least = min_from_extern.unwrap_or(min_default);
117
118         // If there are no negative values, we can use the unsigned fit.
119         if min >= 0 {
120             (cmp::max(unsigned_fit, at_least), false)
121         } else {
122             (cmp::max(signed_fit, at_least), true)
123         }
124     }
125 }
126
127 pub trait PrimitiveExt {
128     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
129 }
130
131 impl PrimitiveExt for Primitive {
132     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
133         match *self {
134             Int(i, signed) => i.to_ty(tcx, signed),
135             Float(FloatTy::F32) => tcx.types.f32,
136             Float(FloatTy::F64) => tcx.types.f64,
137             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
138         }
139     }
140 }
141
142 /// The first half of a fat pointer.
143 ///
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
147
148 /// The second half of a fat pointer.
149 ///
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
153
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
156     Unknown(Ty<'tcx>),
157     SizeOverflow(Ty<'tcx>)
158 }
159
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162         match *self {
163             LayoutError::Unknown(ty) => {
164                 write!(f, "the type `{:?}` has an unknown layout", ty)
165             }
166             LayoutError::SizeOverflow(ty) => {
167                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
168             }
169         }
170     }
171 }
172
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
176 {
177     ty::tls::with_related_context(tcx, move |icx| {
178         let rec_limit = *tcx.sess.recursion_limit.get();
179         let (param_env, ty) = query.into_parts();
180
181         if icx.layout_depth > rec_limit {
182             tcx.sess.fatal(
183                 &format!("overflow representing the type `{}`", ty));
184         }
185
186         // Update the ImplicitCtxt to increase the layout_depth
187         let icx = ty::tls::ImplicitCtxt {
188             layout_depth: icx.layout_depth + 1,
189             ..icx.clone()
190         };
191
192         ty::tls::enter_context(&icx, |_| {
193             let cx = LayoutCx { tcx, param_env };
194             cx.layout_raw_uncached(ty)
195         })
196     })
197 }
198
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200     *providers = ty::query::Providers {
201         layout_raw,
202         ..*providers
203     };
204 }
205
206 pub struct LayoutCx<'tcx, C> {
207     pub tcx: C,
208     pub param_env: ty::ParamEnv<'tcx>
209 }
210
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212     fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214         let tcx = self.tcx;
215         let param_env = self.param_env;
216         let dl = self.data_layout();
217         let scalar_unit = |value: Primitive| {
218             let bits = value.size(dl).bits();
219             assert!(bits <= 128);
220             Scalar {
221                 value,
222                 valid_range: 0..=(!0 >> (128 - bits))
223             }
224         };
225         let scalar = |value: Primitive| {
226             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227         };
228         let scalar_pair = |a: Scalar, b: Scalar| {
229             let b_align = b.value.align(dl);
230             let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
231             let b_offset = a.value.size(dl).align_to(b_align.abi);
232             let size = (b_offset + b.value.size(dl)).align_to(align.abi);
233             LayoutDetails {
234                 variants: Variants::Single { index: VariantIdx::new(0) },
235                 fields: FieldPlacement::Arbitrary {
236                     offsets: vec![Size::ZERO, b_offset],
237                     memory_index: vec![0, 1]
238                 },
239                 abi: Abi::ScalarPair(a, b),
240                 align,
241                 size
242             }
243         };
244
245         #[derive(Copy, Clone, Debug)]
246         enum StructKind {
247             /// A tuple, closure, or univariant which cannot be coerced to unsized.
248             AlwaysSized,
249             /// A univariant, the last field of which may be coerced to unsized.
250             MaybeUnsized,
251             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
252             Prefixed(Size, Align),
253         }
254
255         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
256             let packed = repr.packed();
257             if packed && repr.align > 0 {
258                 bug!("struct cannot be packed and aligned");
259             }
260
261             let pack = Align::from_bytes(repr.pack as u64).unwrap();
262
263             let mut align = if packed {
264                 dl.i8_align
265             } else {
266                 dl.aggregate_align
267             };
268
269             let mut sized = true;
270             let mut offsets = vec![Size::ZERO; fields.len()];
271             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
272
273             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
274             if let StructKind::Prefixed(_, align) = kind {
275                 optimize &= align.bytes() == 1;
276             }
277
278             if optimize {
279                 let end = if let StructKind::MaybeUnsized = kind {
280                     fields.len() - 1
281                 } else {
282                     fields.len()
283                 };
284                 let optimizing = &mut inverse_memory_index[..end];
285                 let field_align = |f: &TyLayout<'_>| {
286                     if packed { f.align.abi.min(pack) } else { f.align.abi }
287                 };
288                 match kind {
289                     StructKind::AlwaysSized |
290                     StructKind::MaybeUnsized => {
291                         optimizing.sort_by_key(|&x| {
292                             // Place ZSTs first to avoid "interesting offsets",
293                             // especially with only one or two non-ZST fields.
294                             let f = &fields[x as usize];
295                             (!f.is_zst(), cmp::Reverse(field_align(f)))
296                         });
297                     }
298                     StructKind::Prefixed(..) => {
299                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
300                     }
301                 }
302             }
303
304             // inverse_memory_index holds field indices by increasing memory offset.
305             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
306             // We now write field offsets to the corresponding offset slot;
307             // field 5 with offset 0 puts 0 in offsets[5].
308             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
309
310             let mut offset = Size::ZERO;
311
312             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
313                 let prefix_align = if packed {
314                     prefix_align.min(pack)
315                 } else {
316                     prefix_align
317                 };
318                 align = align.max(AbiAndPrefAlign::new(prefix_align));
319                 offset = prefix_size.align_to(prefix_align);
320             }
321
322             for &i in &inverse_memory_index {
323                 let field = fields[i as usize];
324                 if !sized {
325                     bug!("univariant: field #{} of `{}` comes after unsized field",
326                          offsets.len(), ty);
327                 }
328
329                 if field.is_unsized() {
330                     sized = false;
331                 }
332
333                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
334                 let field_align = if packed {
335                     field.align.min(AbiAndPrefAlign::new(pack))
336                 } else {
337                     field.align
338                 };
339                 offset = offset.align_to(field_align.abi);
340                 align = align.max(field_align);
341
342                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
343                 offsets[i as usize] = offset;
344
345                 offset = offset.checked_add(field.size, dl)
346                     .ok_or(LayoutError::SizeOverflow(ty))?;
347             }
348
349             if repr.align > 0 {
350                 let repr_align = repr.align as u64;
351                 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
352                 debug!("univariant repr_align: {:?}", repr_align);
353             }
354
355             debug!("univariant min_size: {:?}", offset);
356             let min_size = offset;
357
358             // As stated above, inverse_memory_index holds field indices by increasing offset.
359             // This makes it an already-sorted view of the offsets vec.
360             // To invert it, consider:
361             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
362             // Field 5 would be the first element, so memory_index is i:
363             // Note: if we didn't optimize, it's already right.
364
365             let mut memory_index;
366             if optimize {
367                 memory_index = vec![0; inverse_memory_index.len()];
368
369                 for i in 0..inverse_memory_index.len() {
370                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
371                 }
372             } else {
373                 memory_index = inverse_memory_index;
374             }
375
376             let size = min_size.align_to(align.abi);
377             let mut abi = Abi::Aggregate { sized };
378
379             // Unpack newtype ABIs and find scalar pairs.
380             if sized && size.bytes() > 0 {
381                 // All other fields must be ZSTs, and we need them to all start at 0.
382                 let mut zst_offsets =
383                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
384                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
385                     let mut non_zst_fields =
386                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
387
388                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
389                         // We have exactly one non-ZST field.
390                         (Some((i, field)), None, None) => {
391                             // Field fills the struct and it has a scalar or scalar pair ABI.
392                             if offsets[i].bytes() == 0 &&
393                                align.abi == field.align.abi &&
394                                size == field.size {
395                                 match field.abi {
396                                     // For plain scalars, or vectors of them, we can't unpack
397                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
398                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
399                                         abi = field.abi.clone();
400                                     }
401                                     // But scalar pairs are Rust-specific and get
402                                     // treated as aggregates by C ABIs anyway.
403                                     Abi::ScalarPair(..) => {
404                                         abi = field.abi.clone();
405                                     }
406                                     _ => {}
407                                 }
408                             }
409                         }
410
411                         // Two non-ZST fields, and they're both scalars.
412                         (Some((i, &TyLayout {
413                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
414                         })), Some((j, &TyLayout {
415                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
416                         })), None) => {
417                             // Order by the memory placement, not source order.
418                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
419                                 ((i, a), (j, b))
420                             } else {
421                                 ((j, b), (i, a))
422                             };
423                             let pair = scalar_pair(a.clone(), b.clone());
424                             let pair_offsets = match pair.fields {
425                                 FieldPlacement::Arbitrary {
426                                     ref offsets,
427                                     ref memory_index
428                                 } => {
429                                     assert_eq!(memory_index, &[0, 1]);
430                                     offsets
431                                 }
432                                 _ => bug!()
433                             };
434                             if offsets[i] == pair_offsets[0] &&
435                                offsets[j] == pair_offsets[1] &&
436                                align == pair.align &&
437                                size == pair.size {
438                                 // We can use `ScalarPair` only when it matches our
439                                 // already computed layout (including `#[repr(C)]`).
440                                 abi = pair.abi;
441                             }
442                         }
443
444                         _ => {}
445                     }
446                 }
447             }
448
449             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
450                 abi = Abi::Uninhabited;
451             }
452
453             Ok(LayoutDetails {
454                 variants: Variants::Single { index: VariantIdx::new(0) },
455                 fields: FieldPlacement::Arbitrary {
456                     offsets,
457                     memory_index
458                 },
459                 abi,
460                 align,
461                 size
462             })
463         };
464         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
465             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
466         };
467         debug_assert!(!ty.has_infer_types());
468
469         Ok(match ty.sty {
470             // Basic scalars.
471             ty::Bool => {
472                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
473                     value: Int(I8, false),
474                     valid_range: 0..=1
475                 }))
476             }
477             ty::Char => {
478                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
479                     value: Int(I32, false),
480                     valid_range: 0..=0x10FFFF
481                 }))
482             }
483             ty::Int(ity) => {
484                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
485             }
486             ty::Uint(ity) => {
487                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
488             }
489             ty::Float(fty) => scalar(Float(fty)),
490             ty::FnPtr(_) => {
491                 let mut ptr = scalar_unit(Pointer);
492                 ptr.valid_range = 1..=*ptr.valid_range.end();
493                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
494             }
495
496             // The never type.
497             ty::Never => {
498                 tcx.intern_layout(LayoutDetails {
499                     variants: Variants::Single { index: VariantIdx::new(0) },
500                     fields: FieldPlacement::Union(0),
501                     abi: Abi::Uninhabited,
502                     align: dl.i8_align,
503                     size: Size::ZERO
504                 })
505             }
506
507             // Potentially-fat pointers.
508             ty::Ref(_, pointee, _) |
509             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
510                 let mut data_ptr = scalar_unit(Pointer);
511                 if !ty.is_unsafe_ptr() {
512                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
513                 }
514
515                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
516                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
517                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
518                 }
519
520                 let unsized_part = tcx.struct_tail(pointee);
521                 let metadata = match unsized_part.sty {
522                     ty::Foreign(..) => {
523                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
524                     }
525                     ty::Slice(_) | ty::Str => {
526                         scalar_unit(Int(dl.ptr_sized_integer(), false))
527                     }
528                     ty::Dynamic(..) => {
529                         let mut vtable = scalar_unit(Pointer);
530                         vtable.valid_range = 1..=*vtable.valid_range.end();
531                         vtable
532                     }
533                     _ => return Err(LayoutError::Unknown(unsized_part))
534                 };
535
536                 // Effectively a (ptr, meta) tuple.
537                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
538             }
539
540             // Arrays and slices.
541             ty::Array(element, mut count) => {
542                 if count.has_projections() {
543                     count = tcx.normalize_erasing_regions(param_env, count);
544                     if count.has_projections() {
545                         return Err(LayoutError::Unknown(ty));
546                     }
547                 }
548
549                 let element = self.layout_of(element)?;
550                 let count = count.unwrap_usize(tcx);
551                 let size = element.size.checked_mul(count, dl)
552                     .ok_or(LayoutError::SizeOverflow(ty))?;
553
554                 tcx.intern_layout(LayoutDetails {
555                     variants: Variants::Single { index: VariantIdx::new(0) },
556                     fields: FieldPlacement::Array {
557                         stride: element.size,
558                         count
559                     },
560                     abi: Abi::Aggregate { sized: true },
561                     align: element.align,
562                     size
563                 })
564             }
565             ty::Slice(element) => {
566                 let element = self.layout_of(element)?;
567                 tcx.intern_layout(LayoutDetails {
568                     variants: Variants::Single { index: VariantIdx::new(0) },
569                     fields: FieldPlacement::Array {
570                         stride: element.size,
571                         count: 0
572                     },
573                     abi: Abi::Aggregate { sized: false },
574                     align: element.align,
575                     size: Size::ZERO
576                 })
577             }
578             ty::Str => {
579                 tcx.intern_layout(LayoutDetails {
580                     variants: Variants::Single { index: VariantIdx::new(0) },
581                     fields: FieldPlacement::Array {
582                         stride: Size::from_bytes(1),
583                         count: 0
584                     },
585                     abi: Abi::Aggregate { sized: false },
586                     align: dl.i8_align,
587                     size: Size::ZERO
588                 })
589             }
590
591             // Odd unit types.
592             ty::FnDef(..) => {
593                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
594             }
595             ty::Dynamic(..) | ty::Foreign(..) => {
596                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
597                   StructKind::AlwaysSized)?;
598                 match unit.abi {
599                     Abi::Aggregate { ref mut sized } => *sized = false,
600                     _ => bug!()
601                 }
602                 tcx.intern_layout(unit)
603             }
604
605             // Tuples, generators and closures.
606             ty::Generator(def_id, ref substs, _) => {
607                 let tys = substs.field_tys(def_id, tcx);
608                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
609                     &ReprOptions::default(),
610                     StructKind::AlwaysSized)?
611             }
612
613             ty::Closure(def_id, ref substs) => {
614                 let tys = substs.upvar_tys(def_id, tcx);
615                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
616                     &ReprOptions::default(),
617                     StructKind::AlwaysSized)?
618             }
619
620             ty::Tuple(tys) => {
621                 let kind = if tys.len() == 0 {
622                     StructKind::AlwaysSized
623                 } else {
624                     StructKind::MaybeUnsized
625                 };
626
627                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
628                            &ReprOptions::default(), kind)?
629             }
630
631             // SIMD vector types.
632             ty::Adt(def, ..) if def.repr.simd() => {
633                 let element = self.layout_of(ty.simd_type(tcx))?;
634                 let count = ty.simd_size(tcx) as u64;
635                 assert!(count > 0);
636                 let scalar = match element.abi {
637                     Abi::Scalar(ref scalar) => scalar.clone(),
638                     _ => {
639                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
640                                                  a non-machine element type `{}`",
641                                                 ty, element.ty));
642                     }
643                 };
644                 let size = element.size.checked_mul(count, dl)
645                     .ok_or(LayoutError::SizeOverflow(ty))?;
646                 let align = dl.vector_align(size);
647                 let size = size.align_to(align.abi);
648
649                 tcx.intern_layout(LayoutDetails {
650                     variants: Variants::Single { index: VariantIdx::new(0) },
651                     fields: FieldPlacement::Array {
652                         stride: element.size,
653                         count
654                     },
655                     abi: Abi::Vector {
656                         element: scalar,
657                         count
658                     },
659                     size,
660                     align,
661                 })
662             }
663
664             // ADTs.
665             ty::Adt(def, substs) => {
666                 // Cache the field layouts.
667                 let variants = def.variants.iter().map(|v| {
668                     v.fields.iter().map(|field| {
669                         self.layout_of(field.ty(tcx, substs))
670                     }).collect::<Result<Vec<_>, _>>()
671                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
672
673                 if def.is_union() {
674                     let packed = def.repr.packed();
675                     if packed && def.repr.align > 0 {
676                         bug!("Union cannot be packed and aligned");
677                     }
678
679                     let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
680
681                     let mut align = if packed {
682                         dl.i8_align
683                     } else {
684                         dl.aggregate_align
685                     };
686
687                     if def.repr.align > 0 {
688                         let repr_align = def.repr.align as u64;
689                         align = align.max(
690                             AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
691                     }
692
693                     let optimize = !def.repr.inhibit_union_abi_opt();
694                     let mut size = Size::ZERO;
695                     let mut abi = Abi::Aggregate { sized: true };
696                     let index = VariantIdx::new(0);
697                     for field in &variants[index] {
698                         assert!(!field.is_unsized());
699
700                         let field_align = if packed {
701                             field.align.min(AbiAndPrefAlign::new(pack))
702                         } else {
703                             field.align
704                         };
705                         align = align.max(field_align);
706
707                         // If all non-ZST fields have the same ABI, forward this ABI
708                         if optimize && !field.is_zst() {
709                             // Normalize scalar_unit to the maximal valid range
710                             let field_abi = match &field.abi {
711                                 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
712                                 Abi::ScalarPair(x, y) => {
713                                     Abi::ScalarPair(
714                                         scalar_unit(x.value),
715                                         scalar_unit(y.value),
716                                     )
717                                 }
718                                 Abi::Vector { element: x, count } => {
719                                     Abi::Vector {
720                                         element: scalar_unit(x.value),
721                                         count: *count,
722                                     }
723                                 }
724                                 Abi::Uninhabited |
725                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
726                             };
727
728                             if size == Size::ZERO {
729                                 // first non ZST: initialize 'abi'
730                                 abi = field_abi;
731                             } else if abi != field_abi  {
732                                 // different fields have different ABI: reset to Aggregate
733                                 abi = Abi::Aggregate { sized: true };
734                             }
735                         }
736
737                         size = cmp::max(size, field.size);
738                     }
739
740                     return Ok(tcx.intern_layout(LayoutDetails {
741                         variants: Variants::Single { index },
742                         fields: FieldPlacement::Union(variants[index].len()),
743                         abi,
744                         align,
745                         size: size.align_to(align.abi)
746                     }));
747                 }
748
749                 // A variant is absent if it's uninhabited and only has ZST fields.
750                 // Present uninhabited variants only require space for their fields,
751                 // but *not* an encoding of the discriminant (e.g. a tag value).
752                 // See issue #49298 for more details on the need to leave space
753                 // for non-ZST uninhabited data (mostly partial initialization).
754                 let absent = |fields: &[TyLayout<'_>]| {
755                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
756                     let is_zst = fields.iter().all(|f| f.is_zst());
757                     uninhabited && is_zst
758                 };
759                 let (present_first, present_second) = {
760                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
761                         if absent(v) {
762                             None
763                         } else {
764                             Some(i)
765                         }
766                     });
767                     (present_variants.next(), present_variants.next())
768                 };
769                 if present_first.is_none() {
770                     // Uninhabited because it has no variants, or only absent ones.
771                     return tcx.layout_raw(param_env.and(tcx.types.never));
772                 }
773
774                 let is_struct = !def.is_enum() ||
775                     // Only one variant is present.
776                     (present_second.is_none() &&
777                     // Representation optimizations are allowed.
778                     !def.repr.inhibit_enum_layout_opt());
779                 if is_struct {
780                     // Struct, or univariant enum equivalent to a struct.
781                     // (Typechecking will reject discriminant-sizing attrs.)
782
783                     let v = present_first.unwrap();
784                     let kind = if def.is_enum() || variants[v].len() == 0 {
785                         StructKind::AlwaysSized
786                     } else {
787                         let param_env = tcx.param_env(def.did);
788                         let last_field = def.variants[v].fields.last().unwrap();
789                         let always_sized = tcx.type_of(last_field.did)
790                                               .is_sized(tcx.at(DUMMY_SP), param_env);
791                         if !always_sized { StructKind::MaybeUnsized }
792                         else { StructKind::AlwaysSized }
793                     };
794
795                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
796                     st.variants = Variants::Single { index: v };
797                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
798                     match st.abi {
799                         Abi::Scalar(ref mut scalar) |
800                         Abi::ScalarPair(ref mut scalar, _) => {
801                             // the asserts ensure that we are not using the
802                             // `#[rustc_layout_scalar_valid_range(n)]`
803                             // attribute to widen the range of anything as that would probably
804                             // result in UB somewhere
805                             if let Bound::Included(start) = start {
806                                 assert!(*scalar.valid_range.start() <= start);
807                                 scalar.valid_range = start..=*scalar.valid_range.end();
808                             }
809                             if let Bound::Included(end) = end {
810                                 assert!(*scalar.valid_range.end() >= end);
811                                 scalar.valid_range = *scalar.valid_range.start()..=end;
812                             }
813                         }
814                         _ => assert!(
815                             start == Bound::Unbounded && end == Bound::Unbounded,
816                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
817                             def,
818                             st,
819                         ),
820                     }
821                     return Ok(tcx.intern_layout(st));
822                 }
823
824                 // The current code for niche-filling relies on variant indices
825                 // instead of actual discriminants, so dataful enums with
826                 // explicit discriminants (RFC #2363) would misbehave.
827                 let no_explicit_discriminants = def.variants.iter_enumerated()
828                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
829
830                 // Niche-filling enum optimization.
831                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
832                     let mut dataful_variant = None;
833                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
834
835                     // Find one non-ZST variant.
836                     'variants: for (v, fields) in variants.iter_enumerated() {
837                         if absent(fields) {
838                             continue 'variants;
839                         }
840                         for f in fields {
841                             if !f.is_zst() {
842                                 if dataful_variant.is_none() {
843                                     dataful_variant = Some(v);
844                                     continue 'variants;
845                                 } else {
846                                     dataful_variant = None;
847                                     break 'variants;
848                                 }
849                             }
850                         }
851                         niche_variants = *niche_variants.start().min(&v)..=v;
852                     }
853
854                     if niche_variants.start() > niche_variants.end() {
855                         dataful_variant = None;
856                     }
857
858                     if let Some(i) = dataful_variant {
859                         let count = (
860                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
861                         ) as u128;
862                         for (field_index, &field) in variants[i].iter().enumerate() {
863                             let niche = match self.find_niche(field)? {
864                                 Some(niche) => niche,
865                                 _ => continue,
866                             };
867                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
868                                 Some(pair) => pair,
869                                 None => continue,
870                             };
871
872                             let mut align = dl.aggregate_align;
873                             let st = variants.iter_enumerated().map(|(j, v)| {
874                                 let mut st = univariant_uninterned(v,
875                                     &def.repr, StructKind::AlwaysSized)?;
876                                 st.variants = Variants::Single { index: j };
877
878                                 align = align.max(st.align);
879
880                                 Ok(st)
881                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
882
883                             let offset = st[i].fields.offset(field_index) + niche.offset;
884                             let size = st[i].size;
885
886                             let mut abi = match st[i].abi {
887                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
888                                 Abi::ScalarPair(ref first, ref second) => {
889                                     // We need to use scalar_unit to reset the
890                                     // valid range to the maximal one for that
891                                     // primitive, because only the niche is
892                                     // guaranteed to be initialised, not the
893                                     // other primitive.
894                                     if offset.bytes() == 0 {
895                                         Abi::ScalarPair(
896                                             niche_scalar.clone(),
897                                             scalar_unit(second.value),
898                                         )
899                                     } else {
900                                         Abi::ScalarPair(
901                                             scalar_unit(first.value),
902                                             niche_scalar.clone(),
903                                         )
904                                     }
905                                 }
906                                 _ => Abi::Aggregate { sized: true },
907                             };
908
909                             if st.iter().all(|v| v.abi.is_uninhabited()) {
910                                 abi = Abi::Uninhabited;
911                             }
912
913                             return Ok(tcx.intern_layout(LayoutDetails {
914                                 variants: Variants::NicheFilling {
915                                     dataful_variant: i,
916                                     niche_variants,
917                                     niche: niche_scalar,
918                                     niche_start,
919                                     variants: st,
920                                 },
921                                 fields: FieldPlacement::Arbitrary {
922                                     offsets: vec![offset],
923                                     memory_index: vec![0]
924                                 },
925                                 abi,
926                                 size,
927                                 align,
928                             }));
929                         }
930                     }
931                 }
932
933                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
934                 let discr_type = def.repr.discr_type();
935                 let bits = Integer::from_attr(self, discr_type).size().bits();
936                 for (i, discr) in def.discriminants(tcx) {
937                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
938                         continue;
939                     }
940                     let mut x = discr.val as i128;
941                     if discr_type.is_signed() {
942                         // sign extend the raw representation to be an i128
943                         x = (x << (128 - bits)) >> (128 - bits);
944                     }
945                     if x < min { min = x; }
946                     if x > max { max = x; }
947                 }
948                 // We might have no inhabited variants, so pretend there's at least one.
949                 if (min, max) == (i128::max_value(), i128::min_value()) {
950                     min = 0;
951                     max = 0;
952                 }
953                 assert!(min <= max, "discriminant range is {}...{}", min, max);
954                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
955
956                 let mut align = dl.aggregate_align;
957                 let mut size = Size::ZERO;
958
959                 // We're interested in the smallest alignment, so start large.
960                 let mut start_align = Align::from_bytes(256).unwrap();
961                 assert_eq!(Integer::for_align(dl, start_align), None);
962
963                 // repr(C) on an enum tells us to make a (tag, union) layout,
964                 // so we need to grow the prefix alignment to be at least
965                 // the alignment of the union. (This value is used both for
966                 // determining the alignment of the overall enum, and the
967                 // determining the alignment of the payload after the tag.)
968                 let mut prefix_align = min_ity.align(dl).abi;
969                 if def.repr.c() {
970                     for fields in &variants {
971                         for field in fields {
972                             prefix_align = prefix_align.max(field.align.abi);
973                         }
974                     }
975                 }
976
977                 // Create the set of structs that represent each variant.
978                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
979                     let mut st = univariant_uninterned(&field_layouts,
980                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
981                     st.variants = Variants::Single { index: i };
982                     // Find the first field we can't move later
983                     // to make room for a larger discriminant.
984                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
985                         if !field.is_zst() || field.align.abi.bytes() != 1 {
986                             start_align = start_align.min(field.align.abi);
987                             break;
988                         }
989                     }
990                     size = cmp::max(size, st.size);
991                     align = align.max(st.align);
992                     Ok(st)
993                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
994
995                 // Align the maximum variant size to the largest alignment.
996                 size = size.align_to(align.abi);
997
998                 if size.bytes() >= dl.obj_size_bound() {
999                     return Err(LayoutError::SizeOverflow(ty));
1000                 }
1001
1002                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1003                 if typeck_ity < min_ity {
1004                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1005                     // some reason at this point (based on values discriminant can take on). Mostly
1006                     // because this discriminant will be loaded, and then stored into variable of
1007                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1008                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1009                     // discriminant values. That would be a bug, because then, in codegen, in order
1010                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1011                     // space necessary to represent would have to be discarded (or layout is wrong
1012                     // on thinking it needs 16 bits)
1013                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1014                          min_ity, typeck_ity);
1015                     // However, it is fine to make discr type however large (as an optimisation)
1016                     // after this point â€“ we’ll just truncate the value we load in codegen.
1017                 }
1018
1019                 // Check to see if we should use a different type for the
1020                 // discriminant. We can safely use a type with the same size
1021                 // as the alignment of the first field of each variant.
1022                 // We increase the size of the discriminant to avoid LLVM copying
1023                 // padding when it doesn't need to. This normally causes unaligned
1024                 // load/stores and excessive memcpy/memset operations. By using a
1025                 // bigger integer size, LLVM can be sure about its contents and
1026                 // won't be so conservative.
1027
1028                 // Use the initial field alignment
1029                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1030                     min_ity
1031                 } else {
1032                     Integer::for_align(dl, start_align).unwrap_or(min_ity)
1033                 };
1034
1035                 // If the alignment is not larger than the chosen discriminant size,
1036                 // don't use the alignment as the final size.
1037                 if ity <= min_ity {
1038                     ity = min_ity;
1039                 } else {
1040                     // Patch up the variants' first few fields.
1041                     let old_ity_size = min_ity.size();
1042                     let new_ity_size = ity.size();
1043                     for variant in &mut layout_variants {
1044                         match variant.fields {
1045                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1046                                 for i in offsets {
1047                                     if *i <= old_ity_size {
1048                                         assert_eq!(*i, old_ity_size);
1049                                         *i = new_ity_size;
1050                                     }
1051                                 }
1052                                 // We might be making the struct larger.
1053                                 if variant.size <= old_ity_size {
1054                                     variant.size = new_ity_size;
1055                                 }
1056                             }
1057                             _ => bug!()
1058                         }
1059                     }
1060                 }
1061
1062                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1063                 let tag = Scalar {
1064                     value: Int(ity, signed),
1065                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1066                 };
1067                 let mut abi = Abi::Aggregate { sized: true };
1068                 if tag.value.size(dl) == size {
1069                     abi = Abi::Scalar(tag.clone());
1070                 } else {
1071                     // Try to use a ScalarPair for all tagged enums.
1072                     let mut common_prim = None;
1073                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1074                         let offsets = match layout_variant.fields {
1075                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1076                             _ => bug!(),
1077                         };
1078                         let mut fields = field_layouts
1079                             .iter()
1080                             .zip(offsets)
1081                             .filter(|p| !p.0.is_zst());
1082                         let (field, offset) = match (fields.next(), fields.next()) {
1083                             (None, None) => continue,
1084                             (Some(pair), None) => pair,
1085                             _ => {
1086                                 common_prim = None;
1087                                 break;
1088                             }
1089                         };
1090                         let prim = match field.details.abi {
1091                             Abi::Scalar(ref scalar) => scalar.value,
1092                             _ => {
1093                                 common_prim = None;
1094                                 break;
1095                             }
1096                         };
1097                         if let Some(pair) = common_prim {
1098                             // This is pretty conservative. We could go fancier
1099                             // by conflating things like i32 and u32, or even
1100                             // realising that (u8, u8) could just cohabit with
1101                             // u16 or even u32.
1102                             if pair != (prim, offset) {
1103                                 common_prim = None;
1104                                 break;
1105                             }
1106                         } else {
1107                             common_prim = Some((prim, offset));
1108                         }
1109                     }
1110                     if let Some((prim, offset)) = common_prim {
1111                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1112                         let pair_offsets = match pair.fields {
1113                             FieldPlacement::Arbitrary {
1114                                 ref offsets,
1115                                 ref memory_index
1116                             } => {
1117                                 assert_eq!(memory_index, &[0, 1]);
1118                                 offsets
1119                             }
1120                             _ => bug!()
1121                         };
1122                         if pair_offsets[0] == Size::ZERO &&
1123                             pair_offsets[1] == *offset &&
1124                             align == pair.align &&
1125                             size == pair.size {
1126                             // We can use `ScalarPair` only when it matches our
1127                             // already computed layout (including `#[repr(C)]`).
1128                             abi = pair.abi;
1129                         }
1130                     }
1131                 }
1132
1133                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1134                     abi = Abi::Uninhabited;
1135                 }
1136
1137                 tcx.intern_layout(LayoutDetails {
1138                     variants: Variants::Tagged {
1139                         tag,
1140                         variants: layout_variants,
1141                     },
1142                     fields: FieldPlacement::Arbitrary {
1143                         offsets: vec![Size::ZERO],
1144                         memory_index: vec![0]
1145                     },
1146                     abi,
1147                     align,
1148                     size
1149                 })
1150             }
1151
1152             // Types with no meaningful known layout.
1153             ty::Projection(_) | ty::Opaque(..) => {
1154                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1155                 if ty == normalized {
1156                     return Err(LayoutError::Unknown(ty));
1157                 }
1158                 tcx.layout_raw(param_env.and(normalized))?
1159             }
1160
1161             ty::Bound(..) |
1162             ty::UnnormalizedProjection(..) |
1163             ty::GeneratorWitness(..) |
1164             ty::Infer(_) => {
1165                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1166             }
1167
1168             ty::Param(_) | ty::Error => {
1169                 return Err(LayoutError::Unknown(ty));
1170             }
1171         })
1172     }
1173
1174     /// This is invoked by the `layout_raw` query to record the final
1175     /// layout of each type.
1176     #[inline]
1177     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1178         // If we are running with `-Zprint-type-sizes`, record layouts for
1179         // dumping later. Ignore layouts that are done with non-empty
1180         // environments or non-monomorphic layouts, as the user only wants
1181         // to see the stuff resulting from the final codegen session.
1182         if
1183             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1184             layout.ty.has_param_types() ||
1185             layout.ty.has_self_ty() ||
1186             !self.param_env.caller_bounds.is_empty()
1187         {
1188             return;
1189         }
1190
1191         self.record_layout_for_printing_outlined(layout)
1192     }
1193
1194     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1195         // (delay format until we actually need it)
1196         let record = |kind, packed, opt_discr_size, variants| {
1197             let type_desc = format!("{:?}", layout.ty);
1198             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1199                                                                    type_desc,
1200                                                                    layout.align.abi,
1201                                                                    layout.size,
1202                                                                    packed,
1203                                                                    opt_discr_size,
1204                                                                    variants);
1205         };
1206
1207         let adt_def = match layout.ty.sty {
1208             ty::Adt(ref adt_def, _) => {
1209                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1210                 adt_def
1211             }
1212
1213             ty::Closure(..) => {
1214                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1215                 record(DataTypeKind::Closure, false, None, vec![]);
1216                 return;
1217             }
1218
1219             _ => {
1220                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1221                 return;
1222             }
1223         };
1224
1225         let adt_kind = adt_def.adt_kind();
1226         let adt_packed = adt_def.repr.packed();
1227
1228         let build_variant_info = |n: Option<ast::Name>,
1229                                   flds: &[ast::Name],
1230                                   layout: TyLayout<'tcx>| {
1231             let mut min_size = Size::ZERO;
1232             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1233                 match layout.field(self, i) {
1234                     Err(err) => {
1235                         bug!("no layout found for field {}: `{:?}`", name, err);
1236                     }
1237                     Ok(field_layout) => {
1238                         let offset = layout.fields.offset(i);
1239                         let field_end = offset + field_layout.size;
1240                         if min_size < field_end {
1241                             min_size = field_end;
1242                         }
1243                         session::FieldInfo {
1244                             name: name.to_string(),
1245                             offset: offset.bytes(),
1246                             size: field_layout.size.bytes(),
1247                             align: field_layout.align.abi.bytes(),
1248                         }
1249                     }
1250                 }
1251             }).collect();
1252
1253             session::VariantInfo {
1254                 name: n.map(|n|n.to_string()),
1255                 kind: if layout.is_unsized() {
1256                     session::SizeKind::Min
1257                 } else {
1258                     session::SizeKind::Exact
1259                 },
1260                 align: layout.align.abi.bytes(),
1261                 size: if min_size.bytes() == 0 {
1262                     layout.size.bytes()
1263                 } else {
1264                     min_size.bytes()
1265                 },
1266                 fields: field_info,
1267             }
1268         };
1269
1270         match layout.variants {
1271             Variants::Single { index } => {
1272                 debug!("print-type-size `{:#?}` variant {}",
1273                        layout, adt_def.variants[index].name);
1274                 if !adt_def.variants.is_empty() {
1275                     let variant_def = &adt_def.variants[index];
1276                     let fields: Vec<_> =
1277                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1278                     record(adt_kind.into(),
1279                            adt_packed,
1280                            None,
1281                            vec![build_variant_info(Some(variant_def.name),
1282                                                    &fields,
1283                                                    layout)]);
1284                 } else {
1285                     // (This case arises for *empty* enums; so give it
1286                     // zero variants.)
1287                     record(adt_kind.into(), adt_packed, None, vec![]);
1288                 }
1289             }
1290
1291             Variants::NicheFilling { .. } |
1292             Variants::Tagged { .. } => {
1293                 debug!("print-type-size `{:#?}` adt general variants def {}",
1294                        layout.ty, adt_def.variants.len());
1295                 let variant_infos: Vec<_> =
1296                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1297                         let fields: Vec<_> =
1298                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1299                         build_variant_info(Some(variant_def.name),
1300                                            &fields,
1301                                            layout.for_variant(self, i))
1302                     })
1303                     .collect();
1304                 record(adt_kind.into(), adt_packed, match layout.variants {
1305                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1306                     _ => None
1307                 }, variant_infos);
1308             }
1309         }
1310     }
1311 }
1312
1313 /// Type size "skeleton", i.e. the only information determining a type's size.
1314 /// While this is conservative, (aside from constant sizes, only pointers,
1315 /// newtypes thereof and null pointer optimized enums are allowed), it is
1316 /// enough to statically check common use cases of transmute.
1317 #[derive(Copy, Clone, Debug)]
1318 pub enum SizeSkeleton<'tcx> {
1319     /// Any statically computable Layout.
1320     Known(Size),
1321
1322     /// A potentially-fat pointer.
1323     Pointer {
1324         /// If true, this pointer is never null.
1325         non_zero: bool,
1326         /// The type which determines the unsized metadata, if any,
1327         /// of this pointer. Either a type parameter or a projection
1328         /// depending on one, with regions erased.
1329         tail: Ty<'tcx>
1330     }
1331 }
1332
1333 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1334     pub fn compute(ty: Ty<'tcx>,
1335                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1336                    param_env: ty::ParamEnv<'tcx>)
1337                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1338         debug_assert!(!ty.has_infer_types());
1339
1340         // First try computing a static layout.
1341         let err = match tcx.layout_of(param_env.and(ty)) {
1342             Ok(layout) => {
1343                 return Ok(SizeSkeleton::Known(layout.size));
1344             }
1345             Err(err) => err
1346         };
1347
1348         match ty.sty {
1349             ty::Ref(_, pointee, _) |
1350             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1351                 let non_zero = !ty.is_unsafe_ptr();
1352                 let tail = tcx.struct_tail(pointee);
1353                 match tail.sty {
1354                     ty::Param(_) | ty::Projection(_) => {
1355                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1356                         Ok(SizeSkeleton::Pointer {
1357                             non_zero,
1358                             tail: tcx.erase_regions(&tail)
1359                         })
1360                     }
1361                     _ => {
1362                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1363                               tail `{}` is not a type parameter or a projection",
1364                              ty, err, tail)
1365                     }
1366                 }
1367             }
1368
1369             ty::Adt(def, substs) => {
1370                 // Only newtypes and enums w/ nullable pointer optimization.
1371                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1372                     return Err(err);
1373                 }
1374
1375                 // Get a zero-sized variant or a pointer newtype.
1376                 let zero_or_ptr_variant = |i| {
1377                     let i = VariantIdx::new(i);
1378                     let fields = def.variants[i].fields.iter().map(|field| {
1379                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1380                     });
1381                     let mut ptr = None;
1382                     for field in fields {
1383                         let field = field?;
1384                         match field {
1385                             SizeSkeleton::Known(size) => {
1386                                 if size.bytes() > 0 {
1387                                     return Err(err);
1388                                 }
1389                             }
1390                             SizeSkeleton::Pointer {..} => {
1391                                 if ptr.is_some() {
1392                                     return Err(err);
1393                                 }
1394                                 ptr = Some(field);
1395                             }
1396                         }
1397                     }
1398                     Ok(ptr)
1399                 };
1400
1401                 let v0 = zero_or_ptr_variant(0)?;
1402                 // Newtype.
1403                 if def.variants.len() == 1 {
1404                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1405                         return Ok(SizeSkeleton::Pointer {
1406                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1407                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1408                                 (Bound::Included(start), Bound::Included(end)) =>
1409                                     0 < start && start < end,
1410                                 _ => false,
1411                             },
1412                             tail,
1413                         });
1414                     } else {
1415                         return Err(err);
1416                     }
1417                 }
1418
1419                 let v1 = zero_or_ptr_variant(1)?;
1420                 // Nullable pointer enum optimization.
1421                 match (v0, v1) {
1422                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1423                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1424                         Ok(SizeSkeleton::Pointer {
1425                             non_zero: false,
1426                             tail,
1427                         })
1428                     }
1429                     _ => Err(err)
1430                 }
1431             }
1432
1433             ty::Projection(_) | ty::Opaque(..) => {
1434                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1435                 if ty == normalized {
1436                     Err(err)
1437                 } else {
1438                     SizeSkeleton::compute(normalized, tcx, param_env)
1439                 }
1440             }
1441
1442             _ => Err(err)
1443         }
1444     }
1445
1446     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1447         match (self, other) {
1448             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1449             (SizeSkeleton::Pointer { tail: a, .. },
1450              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1451             _ => false
1452         }
1453     }
1454 }
1455
1456 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1457     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1458 }
1459
1460 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1461     fn data_layout(&self) -> &TargetDataLayout {
1462         &self.data_layout
1463     }
1464 }
1465
1466 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1467     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1468         self.global_tcx()
1469     }
1470 }
1471
1472 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1473     fn data_layout(&self) -> &TargetDataLayout {
1474         self.tcx.data_layout()
1475     }
1476 }
1477
1478 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1479     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1480         self.tcx.tcx()
1481     }
1482 }
1483
1484 pub trait MaybeResult<T> {
1485     fn from_ok(x: T) -> Self;
1486     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1487 }
1488
1489 impl<T> MaybeResult<T> for T {
1490     fn from_ok(x: T) -> Self {
1491         x
1492     }
1493     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1494         f(self)
1495     }
1496 }
1497
1498 impl<T, E> MaybeResult<T> for Result<T, E> {
1499     fn from_ok(x: T) -> Self {
1500         Ok(x)
1501     }
1502     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1503         self.map(f)
1504     }
1505 }
1506
1507 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1508
1509 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1510     type Ty = Ty<'tcx>;
1511     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1512
1513     /// Computes the layout of a type. Note that this implicitly
1514     /// executes in "reveal all" mode.
1515     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1516         let param_env = self.param_env.with_reveal_all();
1517         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1518         let details = self.tcx.layout_raw(param_env.and(ty))?;
1519         let layout = TyLayout {
1520             ty,
1521             details
1522         };
1523
1524         // NB: This recording is normally disabled; when enabled, it
1525         // can however trigger recursive invocations of `layout_of`.
1526         // Therefore, we execute it *after* the main query has
1527         // completed, to avoid problems around recursive structures
1528         // and the like. (Admittedly, I wasn't able to reproduce a problem
1529         // here, but it seems like the right thing to do. -nmatsakis)
1530         self.record_layout_for_printing(layout);
1531
1532         Ok(layout)
1533     }
1534 }
1535
1536 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1537     type Ty = Ty<'tcx>;
1538     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1539
1540     /// Computes the layout of a type. Note that this implicitly
1541     /// executes in "reveal all" mode.
1542     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1543         let param_env = self.param_env.with_reveal_all();
1544         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1545         let details = self.tcx.layout_raw(param_env.and(ty))?;
1546         let layout = TyLayout {
1547             ty,
1548             details
1549         };
1550
1551         // NB: This recording is normally disabled; when enabled, it
1552         // can however trigger recursive invocations of `layout_of`.
1553         // Therefore, we execute it *after* the main query has
1554         // completed, to avoid problems around recursive structures
1555         // and the like. (Admittedly, I wasn't able to reproduce a problem
1556         // here, but it seems like the right thing to do. -nmatsakis)
1557         let cx = LayoutCx {
1558             tcx: *self.tcx,
1559             param_env: self.param_env
1560         };
1561         cx.record_layout_for_printing(layout);
1562
1563         Ok(layout)
1564     }
1565 }
1566
1567 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1568 impl TyCtxt<'a, 'tcx, '_> {
1569     /// Computes the layout of a type. Note that this implicitly
1570     /// executes in "reveal all" mode.
1571     #[inline]
1572     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1573                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1574         let cx = LayoutCx {
1575             tcx: self.global_tcx(),
1576             param_env: param_env_and_ty.param_env
1577         };
1578         cx.layout_of(param_env_and_ty.value)
1579     }
1580 }
1581
1582 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1583     /// Computes the layout of a type. Note that this implicitly
1584     /// executes in "reveal all" mode.
1585     #[inline]
1586     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1587                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1588         let cx = LayoutCx {
1589             tcx: self.global_tcx().at(self.span),
1590             param_env: param_env_and_ty.param_env
1591         };
1592         cx.layout_of(param_env_and_ty.value)
1593     }
1594 }
1595
1596 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1597     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1598           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1599 {
1600     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1601         let details = match this.variants {
1602             Variants::Single { index } if index == variant_index => this.details,
1603
1604             Variants::Single { index } => {
1605                 // Deny calling for_variant more than once for non-Single enums.
1606                 cx.layout_of(this.ty).map_same(|layout| {
1607                     assert_eq!(layout.variants, Variants::Single { index });
1608                     layout
1609                 });
1610
1611                 let fields = match this.ty.sty {
1612                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1613                     _ => bug!()
1614                 };
1615                 let tcx = cx.tcx();
1616                 tcx.intern_layout(LayoutDetails {
1617                     variants: Variants::Single { index: variant_index },
1618                     fields: FieldPlacement::Union(fields),
1619                     abi: Abi::Uninhabited,
1620                     align: tcx.data_layout.i8_align,
1621                     size: Size::ZERO
1622                 })
1623             }
1624
1625             Variants::NicheFilling { ref variants, .. } |
1626             Variants::Tagged { ref variants, .. } => {
1627                 &variants[variant_index]
1628             }
1629         };
1630
1631         assert_eq!(details.variants, Variants::Single { index: variant_index });
1632
1633         TyLayout {
1634             ty: this.ty,
1635             details
1636         }
1637     }
1638
1639     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1640         let tcx = cx.tcx();
1641         cx.layout_of(match this.ty.sty {
1642             ty::Bool |
1643             ty::Char |
1644             ty::Int(_) |
1645             ty::Uint(_) |
1646             ty::Float(_) |
1647             ty::FnPtr(_) |
1648             ty::Never |
1649             ty::FnDef(..) |
1650             ty::GeneratorWitness(..) |
1651             ty::Foreign(..) |
1652             ty::Dynamic(..) => {
1653                 bug!("TyLayout::field_type({:?}): not applicable", this)
1654             }
1655
1656             // Potentially-fat pointers.
1657             ty::Ref(_, pointee, _) |
1658             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1659                 assert!(i < this.fields.count());
1660
1661                 // Reuse the fat *T type as its own thin pointer data field.
1662                 // This provides information about e.g. DST struct pointees
1663                 // (which may have no non-DST form), and will work as long
1664                 // as the `Abi` or `FieldPlacement` is checked by users.
1665                 if i == 0 {
1666                     let nil = tcx.mk_unit();
1667                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1668                         tcx.mk_mut_ptr(nil)
1669                     } else {
1670                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1671                     };
1672                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1673                         ptr_layout.ty = this.ty;
1674                         ptr_layout
1675                     });
1676                 }
1677
1678                 match tcx.struct_tail(pointee).sty {
1679                     ty::Slice(_) |
1680                     ty::Str => tcx.types.usize,
1681                     ty::Dynamic(_, _) => {
1682                         tcx.mk_imm_ref(
1683                             tcx.types.re_static,
1684                             tcx.mk_array(tcx.types.usize, 3),
1685                         )
1686                         /* FIXME use actual fn pointers
1687                         Warning: naively computing the number of entries in the
1688                         vtable by counting the methods on the trait + methods on
1689                         all parent traits does not work, because some methods can
1690                         be not object safe and thus excluded from the vtable.
1691                         Increase this counter if you tried to implement this but
1692                         failed to do it without duplicating a lot of code from
1693                         other places in the compiler: 2
1694                         tcx.mk_tup(&[
1695                             tcx.mk_array(tcx.types.usize, 3),
1696                             tcx.mk_array(Option<fn()>),
1697                         ])
1698                         */
1699                     }
1700                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1701                 }
1702             }
1703
1704             // Arrays and slices.
1705             ty::Array(element, _) |
1706             ty::Slice(element) => element,
1707             ty::Str => tcx.types.u8,
1708
1709             // Tuples, generators and closures.
1710             ty::Closure(def_id, ref substs) => {
1711                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1712             }
1713
1714             ty::Generator(def_id, ref substs, _) => {
1715                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1716             }
1717
1718             ty::Tuple(tys) => tys[i],
1719
1720             // SIMD vector types.
1721             ty::Adt(def, ..) if def.repr.simd() => {
1722                 this.ty.simd_type(tcx)
1723             }
1724
1725             // ADTs.
1726             ty::Adt(def, substs) => {
1727                 match this.variants {
1728                     Variants::Single { index } => {
1729                         def.variants[index].fields[i].ty(tcx, substs)
1730                     }
1731
1732                     // Discriminant field for enums (where applicable).
1733                     Variants::Tagged { tag: ref discr, .. } |
1734                     Variants::NicheFilling { niche: ref discr, .. } => {
1735                         assert_eq!(i, 0);
1736                         let layout = LayoutDetails::scalar(cx, discr.clone());
1737                         return MaybeResult::from_ok(TyLayout {
1738                             details: tcx.intern_layout(layout),
1739                             ty: discr.value.to_ty(tcx)
1740                         });
1741                     }
1742                 }
1743             }
1744
1745             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1746             ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1747                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1748             }
1749         })
1750     }
1751 }
1752
1753 struct Niche {
1754     offset: Size,
1755     scalar: Scalar,
1756     available: u128,
1757 }
1758
1759 impl Niche {
1760     fn reserve<'a, 'tcx>(
1761         &self,
1762         cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1763         count: u128,
1764     ) -> Option<(u128, Scalar)> {
1765         if count > self.available {
1766             return None;
1767         }
1768         let Scalar { value, valid_range: ref v } = self.scalar;
1769         let bits = value.size(cx).bits();
1770         assert!(bits <= 128);
1771         let max_value = !0u128 >> (128 - bits);
1772         let start = v.end().wrapping_add(1) & max_value;
1773         let end = v.end().wrapping_add(count) & max_value;
1774         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1775     }
1776 }
1777
1778 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1779     /// Find the offset of a niche leaf field, starting from
1780     /// the given type and recursing through aggregates.
1781     // FIXME(eddyb) traverse already optimized enums.
1782     fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1783         let scalar_niche = |scalar: &Scalar, offset| {
1784             let Scalar { value, valid_range: ref v } = *scalar;
1785
1786             let bits = value.size(self).bits();
1787             assert!(bits <= 128);
1788             let max_value = !0u128 >> (128 - bits);
1789
1790             // Find out how many values are outside the valid range.
1791             let available = if v.start() <= v.end() {
1792                 v.start() + (max_value - v.end())
1793             } else {
1794                 v.start() - v.end() - 1
1795             };
1796
1797             // Give up if there is no niche value available.
1798             if available == 0 {
1799                 return None;
1800             }
1801
1802             Some(Niche { offset, scalar: scalar.clone(), available })
1803         };
1804
1805         // Locals variables which live across yields are stored
1806         // in the generator type as fields. These may be uninitialized
1807         // so we don't look for niches there.
1808         if let ty::Generator(..) = layout.ty.sty {
1809             return Ok(None);
1810         }
1811
1812         match layout.abi {
1813             Abi::Scalar(ref scalar) => {
1814                 return Ok(scalar_niche(scalar, Size::ZERO));
1815             }
1816             Abi::ScalarPair(ref a, ref b) => {
1817                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1818                 // returns the last maximum.
1819                 let niche = iter::once(
1820                     (b, a.value.size(self).align_to(b.value.align(self).abi))
1821                 )
1822                     .chain(iter::once((a, Size::ZERO)))
1823                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1824                     .max_by_key(|niche| niche.available);
1825                 return Ok(niche);
1826             }
1827             Abi::Vector { ref element, .. } => {
1828                 return Ok(scalar_niche(element, Size::ZERO));
1829             }
1830             _ => {}
1831         }
1832
1833         // Perhaps one of the fields is non-zero, let's recurse and find out.
1834         if let FieldPlacement::Union(_) = layout.fields {
1835             // Only Rust enums have safe-to-inspect fields
1836             // (a discriminant), other unions are unsafe.
1837             if let Variants::Single { .. } = layout.variants {
1838                 return Ok(None);
1839             }
1840         }
1841         if let FieldPlacement::Array { .. } = layout.fields {
1842             if layout.fields.count() > 0 {
1843                 return self.find_niche(layout.field(self, 0)?);
1844             } else {
1845                 return Ok(None);
1846             }
1847         }
1848         let mut niche = None;
1849         let mut available = 0;
1850         for i in 0..layout.fields.count() {
1851             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1852                 if c.available > available {
1853                     available = c.available;
1854                     c.offset += layout.fields.offset(i);
1855                     niche = Some(c);
1856                 }
1857             }
1858         }
1859         Ok(niche)
1860     }
1861 }
1862
1863 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1864     fn hash_stable<W: StableHasherResult>(&self,
1865                                           hcx: &mut StableHashingContext<'a>,
1866                                           hasher: &mut StableHasher<W>) {
1867         use ty::layout::Variants::*;
1868         mem::discriminant(self).hash_stable(hcx, hasher);
1869
1870         match *self {
1871             Single { index } => {
1872                 index.hash_stable(hcx, hasher);
1873             }
1874             Tagged {
1875                 ref tag,
1876                 ref variants,
1877             } => {
1878                 tag.hash_stable(hcx, hasher);
1879                 variants.hash_stable(hcx, hasher);
1880             }
1881             NicheFilling {
1882                 dataful_variant,
1883                 ref niche_variants,
1884                 ref niche,
1885                 niche_start,
1886                 ref variants,
1887             } => {
1888                 dataful_variant.hash_stable(hcx, hasher);
1889                 niche_variants.start().hash_stable(hcx, hasher);
1890                 niche_variants.end().hash_stable(hcx, hasher);
1891                 niche.hash_stable(hcx, hasher);
1892                 niche_start.hash_stable(hcx, hasher);
1893                 variants.hash_stable(hcx, hasher);
1894             }
1895         }
1896     }
1897 }
1898
1899 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1900     fn hash_stable<W: StableHasherResult>(&self,
1901                                           hcx: &mut StableHashingContext<'a>,
1902                                           hasher: &mut StableHasher<W>) {
1903         use ty::layout::FieldPlacement::*;
1904         mem::discriminant(self).hash_stable(hcx, hasher);
1905
1906         match *self {
1907             Union(count) => {
1908                 count.hash_stable(hcx, hasher);
1909             }
1910             Array { count, stride } => {
1911                 count.hash_stable(hcx, hasher);
1912                 stride.hash_stable(hcx, hasher);
1913             }
1914             Arbitrary { ref offsets, ref memory_index } => {
1915                 offsets.hash_stable(hcx, hasher);
1916                 memory_index.hash_stable(hcx, hasher);
1917             }
1918         }
1919     }
1920 }
1921
1922 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1923     fn hash_stable<W: StableHasherResult>(
1924         &self,
1925         hcx: &mut StableHashingContext<'a>,
1926         hasher: &mut StableHasher<W>,
1927     ) {
1928         self.as_u32().hash_stable(hcx, hasher)
1929     }
1930 }
1931
1932 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1933     fn hash_stable<W: StableHasherResult>(&self,
1934                                           hcx: &mut StableHashingContext<'a>,
1935                                           hasher: &mut StableHasher<W>) {
1936         use ty::layout::Abi::*;
1937         mem::discriminant(self).hash_stable(hcx, hasher);
1938
1939         match *self {
1940             Uninhabited => {}
1941             Scalar(ref value) => {
1942                 value.hash_stable(hcx, hasher);
1943             }
1944             ScalarPair(ref a, ref b) => {
1945                 a.hash_stable(hcx, hasher);
1946                 b.hash_stable(hcx, hasher);
1947             }
1948             Vector { ref element, count } => {
1949                 element.hash_stable(hcx, hasher);
1950                 count.hash_stable(hcx, hasher);
1951             }
1952             Aggregate { sized } => {
1953                 sized.hash_stable(hcx, hasher);
1954             }
1955         }
1956     }
1957 }
1958
1959 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1960     fn hash_stable<W: StableHasherResult>(&self,
1961                                           hcx: &mut StableHashingContext<'a>,
1962                                           hasher: &mut StableHasher<W>) {
1963         let Scalar { value, ref valid_range } = *self;
1964         value.hash_stable(hcx, hasher);
1965         valid_range.start().hash_stable(hcx, hasher);
1966         valid_range.end().hash_stable(hcx, hasher);
1967     }
1968 }
1969
1970 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1971     variants,
1972     fields,
1973     abi,
1974     size,
1975     align
1976 });
1977
1978 impl_stable_hash_for!(enum ::ty::layout::Integer {
1979     I8,
1980     I16,
1981     I32,
1982     I64,
1983     I128
1984 });
1985
1986 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1987     Int(integer, signed),
1988     Float(fty),
1989     Pointer
1990 });
1991
1992 impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign {
1993     abi,
1994     pref
1995 });
1996
1997 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1998     fn hash_stable<W: StableHasherResult>(&self,
1999                                           hcx: &mut StableHashingContext<'gcx>,
2000                                           hasher: &mut StableHasher<W>) {
2001         self.bytes().hash_stable(hcx, hasher);
2002     }
2003 }
2004
2005 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2006     fn hash_stable<W: StableHasherResult>(&self,
2007                                           hcx: &mut StableHashingContext<'gcx>,
2008                                           hasher: &mut StableHasher<W>) {
2009         self.bytes().hash_stable(hcx, hasher);
2010     }
2011 }
2012
2013 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2014 {
2015     fn hash_stable<W: StableHasherResult>(&self,
2016                                           hcx: &mut StableHashingContext<'a>,
2017                                           hasher: &mut StableHasher<W>) {
2018         use ty::layout::LayoutError::*;
2019         mem::discriminant(self).hash_stable(hcx, hasher);
2020
2021         match *self {
2022             Unknown(t) |
2023             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2024         }
2025     }
2026 }