]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Rollup merge of #55182 - jD91mZM2:rebased, r=alexcrichton
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23 use std::ops::Bound;
24
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
28                                            StableHasherResult};
29
30 pub use rustc_target::abi::*;
31
32 pub trait IntegerExt {
33     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
36                             ty: Ty<'tcx>,
37                             repr: &ReprOptions,
38                             min: i128,
39                             max: i128)
40                             -> (Integer, bool);
41 }
42
43 impl IntegerExt for Integer {
44     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45         match (*self, signed) {
46             (I8, false) => tcx.types.u8,
47             (I16, false) => tcx.types.u16,
48             (I32, false) => tcx.types.u32,
49             (I64, false) => tcx.types.u64,
50             (I128, false) => tcx.types.u128,
51             (I8, true) => tcx.types.i8,
52             (I16, true) => tcx.types.i16,
53             (I32, true) => tcx.types.i32,
54             (I64, true) => tcx.types.i64,
55             (I128, true) => tcx.types.i128,
56         }
57     }
58
59     /// Get the Integer type from an attr::IntType.
60     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61         let dl = cx.data_layout();
62
63         match ity {
64             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70                 dl.ptr_sized_integer()
71             }
72         }
73     }
74
75     /// Find the appropriate Integer type and signedness for the given
76     /// signed discriminant range and #[repr] attribute.
77     /// N.B.: u128 values above i128::MAX will be treated as signed, but
78     /// that shouldn't affect anything, other than maybe debuginfo.
79     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
80                             ty: Ty<'tcx>,
81                             repr: &ReprOptions,
82                             min: i128,
83                             max: i128)
84                             -> (Integer, bool) {
85         // Theoretically, negative values could be larger in unsigned representation
86         // than the unsigned representation of the signed minimum. However, if there
87         // are any negative values, the only valid unsigned representation is u128
88         // which can fit all i128 values, so the result remains unaffected.
89         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
92         let mut min_from_extern = None;
93         let min_default = I8;
94
95         if let Some(ity) = repr.int {
96             let discr = Integer::from_attr(&tcx, ity);
97             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98             if discr < fit {
99                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100                       discriminant range of enum `{}", ty)
101             }
102             return (discr, ity.is_signed());
103         }
104
105         if repr.c() {
106             match &tcx.sess.target.target.arch[..] {
107                 // WARNING: the ARM EABI has two variants; the one corresponding
108                 // to `at_least == I32` appears to be used on Linux and NetBSD,
109                 // but some systems may use the variant corresponding to no
110                 // lower bound. However, we don't run on those yet...?
111                 "arm" => min_from_extern = Some(I32),
112                 _ => min_from_extern = Some(I32),
113             }
114         }
115
116         let at_least = min_from_extern.unwrap_or(min_default);
117
118         // If there are no negative values, we can use the unsigned fit.
119         if min >= 0 {
120             (cmp::max(unsigned_fit, at_least), false)
121         } else {
122             (cmp::max(signed_fit, at_least), true)
123         }
124     }
125 }
126
127 pub trait PrimitiveExt {
128     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
129 }
130
131 impl PrimitiveExt for Primitive {
132     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
133         match *self {
134             Int(i, signed) => i.to_ty(tcx, signed),
135             Float(FloatTy::F32) => tcx.types.f32,
136             Float(FloatTy::F64) => tcx.types.f64,
137             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
138         }
139     }
140 }
141
142 /// The first half of a fat pointer.
143 ///
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
147
148 /// The second half of a fat pointer.
149 ///
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
153
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
156     Unknown(Ty<'tcx>),
157     SizeOverflow(Ty<'tcx>)
158 }
159
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162         match *self {
163             LayoutError::Unknown(ty) => {
164                 write!(f, "the type `{:?}` has an unknown layout", ty)
165             }
166             LayoutError::SizeOverflow(ty) => {
167                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
168             }
169         }
170     }
171 }
172
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
176 {
177     ty::tls::with_related_context(tcx, move |icx| {
178         let rec_limit = *tcx.sess.recursion_limit.get();
179         let (param_env, ty) = query.into_parts();
180
181         if icx.layout_depth > rec_limit {
182             tcx.sess.fatal(
183                 &format!("overflow representing the type `{}`", ty));
184         }
185
186         // Update the ImplicitCtxt to increase the layout_depth
187         let icx = ty::tls::ImplicitCtxt {
188             layout_depth: icx.layout_depth + 1,
189             ..icx.clone()
190         };
191
192         ty::tls::enter_context(&icx, |_| {
193             let cx = LayoutCx { tcx, param_env };
194             cx.layout_raw_uncached(ty)
195         })
196     })
197 }
198
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200     *providers = ty::query::Providers {
201         layout_raw,
202         ..*providers
203     };
204 }
205
206 pub struct LayoutCx<'tcx, C> {
207     pub tcx: C,
208     pub param_env: ty::ParamEnv<'tcx>
209 }
210
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212     fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214         let tcx = self.tcx;
215         let param_env = self.param_env;
216         let dl = self.data_layout();
217         let scalar_unit = |value: Primitive| {
218             let bits = value.size(dl).bits();
219             assert!(bits <= 128);
220             Scalar {
221                 value,
222                 valid_range: 0..=(!0 >> (128 - bits))
223             }
224         };
225         let scalar = |value: Primitive| {
226             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227         };
228         let scalar_pair = |a: Scalar, b: Scalar| {
229             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231             let size = (b_offset + b.value.size(dl)).abi_align(align);
232             LayoutDetails {
233                 variants: Variants::Single { index: VariantIdx::new(0) },
234                 fields: FieldPlacement::Arbitrary {
235                     offsets: vec![Size::ZERO, b_offset],
236                     memory_index: vec![0, 1]
237                 },
238                 abi: Abi::ScalarPair(a, b),
239                 align,
240                 size
241             }
242         };
243
244         #[derive(Copy, Clone, Debug)]
245         enum StructKind {
246             /// A tuple, closure, or univariant which cannot be coerced to unsized.
247             AlwaysSized,
248             /// A univariant, the last field of which may be coerced to unsized.
249             MaybeUnsized,
250             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251             Prefixed(Size, Align),
252         }
253
254         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
255             let packed = repr.packed();
256             if packed && repr.align > 0 {
257                 bug!("struct cannot be packed and aligned");
258             }
259
260             let pack = {
261                 let pack = repr.pack as u64;
262                 Align::from_bytes(pack, pack).unwrap()
263             };
264
265             let mut align = if packed {
266                 dl.i8_align
267             } else {
268                 dl.aggregate_align
269             };
270
271             let mut sized = true;
272             let mut offsets = vec![Size::ZERO; fields.len()];
273             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
274
275             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
276             if let StructKind::Prefixed(_, align) = kind {
277                 optimize &= align.abi() == 1;
278             }
279
280             if optimize {
281                 let end = if let StructKind::MaybeUnsized = kind {
282                     fields.len() - 1
283                 } else {
284                     fields.len()
285                 };
286                 let optimizing = &mut inverse_memory_index[..end];
287                 let field_align = |f: &TyLayout<'_>| {
288                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
289                 };
290                 match kind {
291                     StructKind::AlwaysSized |
292                     StructKind::MaybeUnsized => {
293                         optimizing.sort_by_key(|&x| {
294                             // Place ZSTs first to avoid "interesting offsets",
295                             // especially with only one or two non-ZST fields.
296                             let f = &fields[x as usize];
297                             (!f.is_zst(), cmp::Reverse(field_align(f)))
298                         });
299                     }
300                     StructKind::Prefixed(..) => {
301                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
302                     }
303                 }
304             }
305
306             // inverse_memory_index holds field indices by increasing memory offset.
307             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
308             // We now write field offsets to the corresponding offset slot;
309             // field 5 with offset 0 puts 0 in offsets[5].
310             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
311
312             let mut offset = Size::ZERO;
313
314             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
315                 if packed {
316                     let prefix_align = prefix_align.min(pack);
317                     align = align.max(prefix_align);
318                 } else {
319                     align = align.max(prefix_align);
320                 }
321                 offset = prefix_size.abi_align(prefix_align);
322             }
323
324             for &i in &inverse_memory_index {
325                 let field = fields[i as usize];
326                 if !sized {
327                     bug!("univariant: field #{} of `{}` comes after unsized field",
328                          offsets.len(), ty);
329                 }
330
331                 if field.is_unsized() {
332                     sized = false;
333                 }
334
335                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
336                 if packed {
337                     let field_pack = field.align.min(pack);
338                     offset = offset.abi_align(field_pack);
339                     align = align.max(field_pack);
340                 }
341                 else {
342                     offset = offset.abi_align(field.align);
343                     align = align.max(field.align);
344                 }
345
346                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
347                 offsets[i as usize] = offset;
348
349                 offset = offset.checked_add(field.size, dl)
350                     .ok_or(LayoutError::SizeOverflow(ty))?;
351             }
352
353             if repr.align > 0 {
354                 let repr_align = repr.align as u64;
355                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
356                 debug!("univariant repr_align: {:?}", repr_align);
357             }
358
359             debug!("univariant min_size: {:?}", offset);
360             let min_size = offset;
361
362             // As stated above, inverse_memory_index holds field indices by increasing offset.
363             // This makes it an already-sorted view of the offsets vec.
364             // To invert it, consider:
365             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
366             // Field 5 would be the first element, so memory_index is i:
367             // Note: if we didn't optimize, it's already right.
368
369             let mut memory_index;
370             if optimize {
371                 memory_index = vec![0; inverse_memory_index.len()];
372
373                 for i in 0..inverse_memory_index.len() {
374                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
375                 }
376             } else {
377                 memory_index = inverse_memory_index;
378             }
379
380             let size = min_size.abi_align(align);
381             let mut abi = Abi::Aggregate { sized };
382
383             // Unpack newtype ABIs and find scalar pairs.
384             if sized && size.bytes() > 0 {
385                 // All other fields must be ZSTs, and we need them to all start at 0.
386                 let mut zst_offsets =
387                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
388                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
389                     let mut non_zst_fields =
390                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
391
392                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
393                         // We have exactly one non-ZST field.
394                         (Some((i, field)), None, None) => {
395                             // Field fills the struct and it has a scalar or scalar pair ABI.
396                             if offsets[i].bytes() == 0 &&
397                                align.abi() == field.align.abi() &&
398                                size == field.size {
399                                 match field.abi {
400                                     // For plain scalars, or vectors of them, we can't unpack
401                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
402                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
403                                         abi = field.abi.clone();
404                                     }
405                                     // But scalar pairs are Rust-specific and get
406                                     // treated as aggregates by C ABIs anyway.
407                                     Abi::ScalarPair(..) => {
408                                         abi = field.abi.clone();
409                                     }
410                                     _ => {}
411                                 }
412                             }
413                         }
414
415                         // Two non-ZST fields, and they're both scalars.
416                         (Some((i, &TyLayout {
417                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
418                         })), Some((j, &TyLayout {
419                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
420                         })), None) => {
421                             // Order by the memory placement, not source order.
422                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
423                                 ((i, a), (j, b))
424                             } else {
425                                 ((j, b), (i, a))
426                             };
427                             let pair = scalar_pair(a.clone(), b.clone());
428                             let pair_offsets = match pair.fields {
429                                 FieldPlacement::Arbitrary {
430                                     ref offsets,
431                                     ref memory_index
432                                 } => {
433                                     assert_eq!(memory_index, &[0, 1]);
434                                     offsets
435                                 }
436                                 _ => bug!()
437                             };
438                             if offsets[i] == pair_offsets[0] &&
439                                offsets[j] == pair_offsets[1] &&
440                                align == pair.align &&
441                                size == pair.size {
442                                 // We can use `ScalarPair` only when it matches our
443                                 // already computed layout (including `#[repr(C)]`).
444                                 abi = pair.abi;
445                             }
446                         }
447
448                         _ => {}
449                     }
450                 }
451             }
452
453             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
454                 abi = Abi::Uninhabited;
455             }
456
457             Ok(LayoutDetails {
458                 variants: Variants::Single { index: VariantIdx::new(0) },
459                 fields: FieldPlacement::Arbitrary {
460                     offsets,
461                     memory_index
462                 },
463                 abi,
464                 align,
465                 size
466             })
467         };
468         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
469             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
470         };
471         debug_assert!(!ty.has_infer_types());
472
473         Ok(match ty.sty {
474             // Basic scalars.
475             ty::Bool => {
476                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
477                     value: Int(I8, false),
478                     valid_range: 0..=1
479                 }))
480             }
481             ty::Char => {
482                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
483                     value: Int(I32, false),
484                     valid_range: 0..=0x10FFFF
485                 }))
486             }
487             ty::Int(ity) => {
488                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
489             }
490             ty::Uint(ity) => {
491                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
492             }
493             ty::Float(fty) => scalar(Float(fty)),
494             ty::FnPtr(_) => {
495                 let mut ptr = scalar_unit(Pointer);
496                 ptr.valid_range = 1..=*ptr.valid_range.end();
497                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
498             }
499
500             // The never type.
501             ty::Never => {
502                 tcx.intern_layout(LayoutDetails {
503                     variants: Variants::Single { index: VariantIdx::new(0) },
504                     fields: FieldPlacement::Union(0),
505                     abi: Abi::Uninhabited,
506                     align: dl.i8_align,
507                     size: Size::ZERO
508                 })
509             }
510
511             // Potentially-fat pointers.
512             ty::Ref(_, pointee, _) |
513             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514                 let mut data_ptr = scalar_unit(Pointer);
515                 if !ty.is_unsafe_ptr() {
516                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517                 }
518
519                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
522                 }
523
524                 let unsized_part = tcx.struct_tail(pointee);
525                 let metadata = match unsized_part.sty {
526                     ty::Foreign(..) => {
527                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
528                     }
529                     ty::Slice(_) | ty::Str => {
530                         scalar_unit(Int(dl.ptr_sized_integer(), false))
531                     }
532                     ty::Dynamic(..) => {
533                         let mut vtable = scalar_unit(Pointer);
534                         vtable.valid_range = 1..=*vtable.valid_range.end();
535                         vtable
536                     }
537                     _ => return Err(LayoutError::Unknown(unsized_part))
538                 };
539
540                 // Effectively a (ptr, meta) tuple.
541                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
542             }
543
544             // Arrays and slices.
545             ty::Array(element, mut count) => {
546                 if count.has_projections() {
547                     count = tcx.normalize_erasing_regions(param_env, count);
548                     if count.has_projections() {
549                         return Err(LayoutError::Unknown(ty));
550                     }
551                 }
552
553                 let element = self.layout_of(element)?;
554                 let count = count.unwrap_usize(tcx);
555                 let size = element.size.checked_mul(count, dl)
556                     .ok_or(LayoutError::SizeOverflow(ty))?;
557
558                 tcx.intern_layout(LayoutDetails {
559                     variants: Variants::Single { index: VariantIdx::new(0) },
560                     fields: FieldPlacement::Array {
561                         stride: element.size,
562                         count
563                     },
564                     abi: Abi::Aggregate { sized: true },
565                     align: element.align,
566                     size
567                 })
568             }
569             ty::Slice(element) => {
570                 let element = self.layout_of(element)?;
571                 tcx.intern_layout(LayoutDetails {
572                     variants: Variants::Single { index: VariantIdx::new(0) },
573                     fields: FieldPlacement::Array {
574                         stride: element.size,
575                         count: 0
576                     },
577                     abi: Abi::Aggregate { sized: false },
578                     align: element.align,
579                     size: Size::ZERO
580                 })
581             }
582             ty::Str => {
583                 tcx.intern_layout(LayoutDetails {
584                     variants: Variants::Single { index: VariantIdx::new(0) },
585                     fields: FieldPlacement::Array {
586                         stride: Size::from_bytes(1),
587                         count: 0
588                     },
589                     abi: Abi::Aggregate { sized: false },
590                     align: dl.i8_align,
591                     size: Size::ZERO
592                 })
593             }
594
595             // Odd unit types.
596             ty::FnDef(..) => {
597                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
598             }
599             ty::Dynamic(..) | ty::Foreign(..) => {
600                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
601                   StructKind::AlwaysSized)?;
602                 match unit.abi {
603                     Abi::Aggregate { ref mut sized } => *sized = false,
604                     _ => bug!()
605                 }
606                 tcx.intern_layout(unit)
607             }
608
609             // Tuples, generators and closures.
610             ty::Generator(def_id, ref substs, _) => {
611                 let tys = substs.field_tys(def_id, tcx);
612                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
613                     &ReprOptions::default(),
614                     StructKind::AlwaysSized)?
615             }
616
617             ty::Closure(def_id, ref substs) => {
618                 let tys = substs.upvar_tys(def_id, tcx);
619                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620                     &ReprOptions::default(),
621                     StructKind::AlwaysSized)?
622             }
623
624             ty::Tuple(tys) => {
625                 let kind = if tys.len() == 0 {
626                     StructKind::AlwaysSized
627                 } else {
628                     StructKind::MaybeUnsized
629                 };
630
631                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
632                            &ReprOptions::default(), kind)?
633             }
634
635             // SIMD vector types.
636             ty::Adt(def, ..) if def.repr.simd() => {
637                 let element = self.layout_of(ty.simd_type(tcx))?;
638                 let count = ty.simd_size(tcx) as u64;
639                 assert!(count > 0);
640                 let scalar = match element.abi {
641                     Abi::Scalar(ref scalar) => scalar.clone(),
642                     _ => {
643                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
644                                                  a non-machine element type `{}`",
645                                                 ty, element.ty));
646                     }
647                 };
648                 let size = element.size.checked_mul(count, dl)
649                     .ok_or(LayoutError::SizeOverflow(ty))?;
650                 let align = dl.vector_align(size);
651                 let size = size.abi_align(align);
652
653                 tcx.intern_layout(LayoutDetails {
654                     variants: Variants::Single { index: VariantIdx::new(0) },
655                     fields: FieldPlacement::Array {
656                         stride: element.size,
657                         count
658                     },
659                     abi: Abi::Vector {
660                         element: scalar,
661                         count
662                     },
663                     size,
664                     align,
665                 })
666             }
667
668             // ADTs.
669             ty::Adt(def, substs) => {
670                 // Cache the field layouts.
671                 let variants = def.variants.iter().map(|v| {
672                     v.fields.iter().map(|field| {
673                         self.layout_of(field.ty(tcx, substs))
674                     }).collect::<Result<Vec<_>, _>>()
675                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
676
677                 if def.is_union() {
678                     let packed = def.repr.packed();
679                     if packed && def.repr.align > 0 {
680                         bug!("Union cannot be packed and aligned");
681                     }
682
683                     let pack = {
684                         let pack = def.repr.pack as u64;
685                         Align::from_bytes(pack, pack).unwrap()
686                     };
687
688                     let mut align = if packed {
689                         dl.i8_align
690                     } else {
691                         dl.aggregate_align
692                     };
693
694                     if def.repr.align > 0 {
695                         let repr_align = def.repr.align as u64;
696                         align = align.max(
697                             Align::from_bytes(repr_align, repr_align).unwrap());
698                     }
699
700                     let mut size = Size::ZERO;
701                     let index = VariantIdx::new(0);
702                     for field in &variants[index] {
703                         assert!(!field.is_unsized());
704
705                         if packed {
706                             let field_pack = field.align.min(pack);
707                             align = align.max(field_pack);
708                         } else {
709                             align = align.max(field.align);
710                         }
711                         size = cmp::max(size, field.size);
712                     }
713
714                     return Ok(tcx.intern_layout(LayoutDetails {
715                         variants: Variants::Single { index },
716                         fields: FieldPlacement::Union(variants[index].len()),
717                         abi: Abi::Aggregate { sized: true },
718                         align,
719                         size: size.abi_align(align)
720                     }));
721                 }
722
723                 // A variant is absent if it's uninhabited and only has ZST fields.
724                 // Present uninhabited variants only require space for their fields,
725                 // but *not* an encoding of the discriminant (e.g. a tag value).
726                 // See issue #49298 for more details on the need to leave space
727                 // for non-ZST uninhabited data (mostly partial initialization).
728                 let absent = |fields: &[TyLayout<'_>]| {
729                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
730                     let is_zst = fields.iter().all(|f| f.is_zst());
731                     uninhabited && is_zst
732                 };
733                 let (present_first, present_second) = {
734                     let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
735                         if absent(v) {
736                             None
737                         } else {
738                             Some(i)
739                         }
740                     });
741                     (present_variants.next(), present_variants.next())
742                 };
743                 if present_first.is_none() {
744                     // Uninhabited because it has no variants, or only absent ones.
745                     return tcx.layout_raw(param_env.and(tcx.types.never));
746                 }
747
748                 let is_struct = !def.is_enum() ||
749                     // Only one variant is present.
750                     (present_second.is_none() &&
751                     // Representation optimizations are allowed.
752                     !def.repr.inhibit_enum_layout_opt());
753                 if is_struct {
754                     // Struct, or univariant enum equivalent to a struct.
755                     // (Typechecking will reject discriminant-sizing attrs.)
756
757                     let v = present_first.unwrap();
758                     let kind = if def.is_enum() || variants[v].len() == 0 {
759                         StructKind::AlwaysSized
760                     } else {
761                         let param_env = tcx.param_env(def.did);
762                         let last_field = def.variants[v].fields.last().unwrap();
763                         let always_sized = tcx.type_of(last_field.did)
764                                               .is_sized(tcx.at(DUMMY_SP), param_env);
765                         if !always_sized { StructKind::MaybeUnsized }
766                         else { StructKind::AlwaysSized }
767                     };
768
769                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
770                     st.variants = Variants::Single { index: v };
771                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
772                     match st.abi {
773                         Abi::Scalar(ref mut scalar) |
774                         Abi::ScalarPair(ref mut scalar, _) => {
775                             // the asserts ensure that we are not using the
776                             // `#[rustc_layout_scalar_valid_range(n)]`
777                             // attribute to widen the range of anything as that would probably
778                             // result in UB somewhere
779                             if let Bound::Included(start) = start {
780                                 assert!(*scalar.valid_range.start() <= start);
781                                 scalar.valid_range = start..=*scalar.valid_range.end();
782                             }
783                             if let Bound::Included(end) = end {
784                                 assert!(*scalar.valid_range.end() >= end);
785                                 scalar.valid_range = *scalar.valid_range.start()..=end;
786                             }
787                         }
788                         _ => assert!(
789                             start == Bound::Unbounded && end == Bound::Unbounded,
790                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
791                             def,
792                             st,
793                         ),
794                     }
795                     return Ok(tcx.intern_layout(st));
796                 }
797
798                 // The current code for niche-filling relies on variant indices
799                 // instead of actual discriminants, so dataful enums with
800                 // explicit discriminants (RFC #2363) would misbehave.
801                 let no_explicit_discriminants = def.variants.iter_enumerated()
802                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
803
804                 // Niche-filling enum optimization.
805                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
806                     let mut dataful_variant = None;
807                     let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
808
809                     // Find one non-ZST variant.
810                     'variants: for (v, fields) in variants.iter_enumerated() {
811                         if absent(fields) {
812                             continue 'variants;
813                         }
814                         for f in fields {
815                             if !f.is_zst() {
816                                 if dataful_variant.is_none() {
817                                     dataful_variant = Some(v);
818                                     continue 'variants;
819                                 } else {
820                                     dataful_variant = None;
821                                     break 'variants;
822                                 }
823                             }
824                         }
825                         niche_variants = *niche_variants.start().min(&v)..=v;
826                     }
827
828                     if niche_variants.start() > niche_variants.end() {
829                         dataful_variant = None;
830                     }
831
832                     if let Some(i) = dataful_variant {
833                         let count = (
834                             niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
835                         ) as u128;
836                         for (field_index, &field) in variants[i].iter().enumerate() {
837                             let niche = match self.find_niche(field)? {
838                                 Some(niche) => niche,
839                                 _ => continue,
840                             };
841                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
842                                 Some(pair) => pair,
843                                 None => continue,
844                             };
845
846                             let mut align = dl.aggregate_align;
847                             let st = variants.iter_enumerated().map(|(j, v)| {
848                                 let mut st = univariant_uninterned(v,
849                                     &def.repr, StructKind::AlwaysSized)?;
850                                 st.variants = Variants::Single { index: j };
851
852                                 align = align.max(st.align);
853
854                                 Ok(st)
855                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
856
857                             let offset = st[i].fields.offset(field_index) + niche.offset;
858                             let size = st[i].size;
859
860                             let mut abi = match st[i].abi {
861                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
862                                 Abi::ScalarPair(ref first, ref second) => {
863                                     // We need to use scalar_unit to reset the
864                                     // valid range to the maximal one for that
865                                     // primitive, because only the niche is
866                                     // guaranteed to be initialised, not the
867                                     // other primitive.
868                                     if offset.bytes() == 0 {
869                                         Abi::ScalarPair(
870                                             niche_scalar.clone(),
871                                             scalar_unit(second.value),
872                                         )
873                                     } else {
874                                         Abi::ScalarPair(
875                                             scalar_unit(first.value),
876                                             niche_scalar.clone(),
877                                         )
878                                     }
879                                 }
880                                 _ => Abi::Aggregate { sized: true },
881                             };
882
883                             if st.iter().all(|v| v.abi.is_uninhabited()) {
884                                 abi = Abi::Uninhabited;
885                             }
886
887                             return Ok(tcx.intern_layout(LayoutDetails {
888                                 variants: Variants::NicheFilling {
889                                     dataful_variant: i,
890                                     niche_variants,
891                                     niche: niche_scalar,
892                                     niche_start,
893                                     variants: st,
894                                 },
895                                 fields: FieldPlacement::Arbitrary {
896                                     offsets: vec![offset],
897                                     memory_index: vec![0]
898                                 },
899                                 abi,
900                                 size,
901                                 align,
902                             }));
903                         }
904                     }
905                 }
906
907                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
908                 let discr_type = def.repr.discr_type();
909                 let bits = Integer::from_attr(self, discr_type).size().bits();
910                 for (i, discr) in def.discriminants(tcx) {
911                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
912                         continue;
913                     }
914                     let mut x = discr.val as i128;
915                     if discr_type.is_signed() {
916                         // sign extend the raw representation to be an i128
917                         x = (x << (128 - bits)) >> (128 - bits);
918                     }
919                     if x < min { min = x; }
920                     if x > max { max = x; }
921                 }
922                 // We might have no inhabited variants, so pretend there's at least one.
923                 if (min, max) == (i128::max_value(), i128::min_value()) {
924                     min = 0;
925                     max = 0;
926                 }
927                 assert!(min <= max, "discriminant range is {}...{}", min, max);
928                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
929
930                 let mut align = dl.aggregate_align;
931                 let mut size = Size::ZERO;
932
933                 // We're interested in the smallest alignment, so start large.
934                 let mut start_align = Align::from_bytes(256, 256).unwrap();
935                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
936
937                 // repr(C) on an enum tells us to make a (tag, union) layout,
938                 // so we need to grow the prefix alignment to be at least
939                 // the alignment of the union. (This value is used both for
940                 // determining the alignment of the overall enum, and the
941                 // determining the alignment of the payload after the tag.)
942                 let mut prefix_align = min_ity.align(dl);
943                 if def.repr.c() {
944                     for fields in &variants {
945                         for field in fields {
946                             prefix_align = prefix_align.max(field.align);
947                         }
948                     }
949                 }
950
951                 // Create the set of structs that represent each variant.
952                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
953                     let mut st = univariant_uninterned(&field_layouts,
954                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
955                     st.variants = Variants::Single { index: i };
956                     // Find the first field we can't move later
957                     // to make room for a larger discriminant.
958                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
959                         if !field.is_zst() || field.align.abi() != 1 {
960                             start_align = start_align.min(field.align);
961                             break;
962                         }
963                     }
964                     size = cmp::max(size, st.size);
965                     align = align.max(st.align);
966                     Ok(st)
967                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
968
969                 // Align the maximum variant size to the largest alignment.
970                 size = size.abi_align(align);
971
972                 if size.bytes() >= dl.obj_size_bound() {
973                     return Err(LayoutError::SizeOverflow(ty));
974                 }
975
976                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
977                 if typeck_ity < min_ity {
978                     // It is a bug if Layout decided on a greater discriminant size than typeck for
979                     // some reason at this point (based on values discriminant can take on). Mostly
980                     // because this discriminant will be loaded, and then stored into variable of
981                     // type calculated by typeck. Consider such case (a bug): typeck decided on
982                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
983                     // discriminant values. That would be a bug, because then, in codegen, in order
984                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
985                     // space necessary to represent would have to be discarded (or layout is wrong
986                     // on thinking it needs 16 bits)
987                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
988                          min_ity, typeck_ity);
989                     // However, it is fine to make discr type however large (as an optimisation)
990                     // after this point â€“ we’ll just truncate the value we load in codegen.
991                 }
992
993                 // Check to see if we should use a different type for the
994                 // discriminant. We can safely use a type with the same size
995                 // as the alignment of the first field of each variant.
996                 // We increase the size of the discriminant to avoid LLVM copying
997                 // padding when it doesn't need to. This normally causes unaligned
998                 // load/stores and excessive memcpy/memset operations. By using a
999                 // bigger integer size, LLVM can be sure about its contents and
1000                 // won't be so conservative.
1001
1002                 // Use the initial field alignment
1003                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1004                     min_ity
1005                 } else {
1006                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1007                 };
1008
1009                 // If the alignment is not larger than the chosen discriminant size,
1010                 // don't use the alignment as the final size.
1011                 if ity <= min_ity {
1012                     ity = min_ity;
1013                 } else {
1014                     // Patch up the variants' first few fields.
1015                     let old_ity_size = min_ity.size();
1016                     let new_ity_size = ity.size();
1017                     for variant in &mut layout_variants {
1018                         match variant.fields {
1019                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1020                                 for i in offsets {
1021                                     if *i <= old_ity_size {
1022                                         assert_eq!(*i, old_ity_size);
1023                                         *i = new_ity_size;
1024                                     }
1025                                 }
1026                                 // We might be making the struct larger.
1027                                 if variant.size <= old_ity_size {
1028                                     variant.size = new_ity_size;
1029                                 }
1030                             }
1031                             _ => bug!()
1032                         }
1033                     }
1034                 }
1035
1036                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1037                 let tag = Scalar {
1038                     value: Int(ity, signed),
1039                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1040                 };
1041                 let mut abi = Abi::Aggregate { sized: true };
1042                 if tag.value.size(dl) == size {
1043                     abi = Abi::Scalar(tag.clone());
1044                 } else {
1045                     // Try to use a ScalarPair for all tagged enums.
1046                     let mut common_prim = None;
1047                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1048                         let offsets = match layout_variant.fields {
1049                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1050                             _ => bug!(),
1051                         };
1052                         let mut fields = field_layouts
1053                             .iter()
1054                             .zip(offsets)
1055                             .filter(|p| !p.0.is_zst());
1056                         let (field, offset) = match (fields.next(), fields.next()) {
1057                             (None, None) => continue,
1058                             (Some(pair), None) => pair,
1059                             _ => {
1060                                 common_prim = None;
1061                                 break;
1062                             }
1063                         };
1064                         let prim = match field.details.abi {
1065                             Abi::Scalar(ref scalar) => scalar.value,
1066                             _ => {
1067                                 common_prim = None;
1068                                 break;
1069                             }
1070                         };
1071                         if let Some(pair) = common_prim {
1072                             // This is pretty conservative. We could go fancier
1073                             // by conflating things like i32 and u32, or even
1074                             // realising that (u8, u8) could just cohabit with
1075                             // u16 or even u32.
1076                             if pair != (prim, offset) {
1077                                 common_prim = None;
1078                                 break;
1079                             }
1080                         } else {
1081                             common_prim = Some((prim, offset));
1082                         }
1083                     }
1084                     if let Some((prim, offset)) = common_prim {
1085                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1086                         let pair_offsets = match pair.fields {
1087                             FieldPlacement::Arbitrary {
1088                                 ref offsets,
1089                                 ref memory_index
1090                             } => {
1091                                 assert_eq!(memory_index, &[0, 1]);
1092                                 offsets
1093                             }
1094                             _ => bug!()
1095                         };
1096                         if pair_offsets[0] == Size::ZERO &&
1097                             pair_offsets[1] == *offset &&
1098                             align == pair.align &&
1099                             size == pair.size {
1100                             // We can use `ScalarPair` only when it matches our
1101                             // already computed layout (including `#[repr(C)]`).
1102                             abi = pair.abi;
1103                         }
1104                     }
1105                 }
1106
1107                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1108                     abi = Abi::Uninhabited;
1109                 }
1110
1111                 tcx.intern_layout(LayoutDetails {
1112                     variants: Variants::Tagged {
1113                         tag,
1114                         variants: layout_variants,
1115                     },
1116                     fields: FieldPlacement::Arbitrary {
1117                         offsets: vec![Size::ZERO],
1118                         memory_index: vec![0]
1119                     },
1120                     abi,
1121                     align,
1122                     size
1123                 })
1124             }
1125
1126             // Types with no meaningful known layout.
1127             ty::Projection(_) | ty::Opaque(..) => {
1128                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1129                 if ty == normalized {
1130                     return Err(LayoutError::Unknown(ty));
1131                 }
1132                 tcx.layout_raw(param_env.and(normalized))?
1133             }
1134
1135             ty::Bound(..) |
1136             ty::UnnormalizedProjection(..) |
1137             ty::GeneratorWitness(..) |
1138             ty::Infer(_) => {
1139                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1140             }
1141
1142             ty::Param(_) | ty::Error => {
1143                 return Err(LayoutError::Unknown(ty));
1144             }
1145         })
1146     }
1147
1148     /// This is invoked by the `layout_raw` query to record the final
1149     /// layout of each type.
1150     #[inline]
1151     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1152         // If we are running with `-Zprint-type-sizes`, record layouts for
1153         // dumping later. Ignore layouts that are done with non-empty
1154         // environments or non-monomorphic layouts, as the user only wants
1155         // to see the stuff resulting from the final codegen session.
1156         if
1157             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1158             layout.ty.has_param_types() ||
1159             layout.ty.has_self_ty() ||
1160             !self.param_env.caller_bounds.is_empty()
1161         {
1162             return;
1163         }
1164
1165         self.record_layout_for_printing_outlined(layout)
1166     }
1167
1168     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1169         // (delay format until we actually need it)
1170         let record = |kind, packed, opt_discr_size, variants| {
1171             let type_desc = format!("{:?}", layout.ty);
1172             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1173                                                                    type_desc,
1174                                                                    layout.align,
1175                                                                    layout.size,
1176                                                                    packed,
1177                                                                    opt_discr_size,
1178                                                                    variants);
1179         };
1180
1181         let adt_def = match layout.ty.sty {
1182             ty::Adt(ref adt_def, _) => {
1183                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1184                 adt_def
1185             }
1186
1187             ty::Closure(..) => {
1188                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1189                 record(DataTypeKind::Closure, false, None, vec![]);
1190                 return;
1191             }
1192
1193             _ => {
1194                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1195                 return;
1196             }
1197         };
1198
1199         let adt_kind = adt_def.adt_kind();
1200         let adt_packed = adt_def.repr.packed();
1201
1202         let build_variant_info = |n: Option<ast::Name>,
1203                                   flds: &[ast::Name],
1204                                   layout: TyLayout<'tcx>| {
1205             let mut min_size = Size::ZERO;
1206             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1207                 match layout.field(self, i) {
1208                     Err(err) => {
1209                         bug!("no layout found for field {}: `{:?}`", name, err);
1210                     }
1211                     Ok(field_layout) => {
1212                         let offset = layout.fields.offset(i);
1213                         let field_end = offset + field_layout.size;
1214                         if min_size < field_end {
1215                             min_size = field_end;
1216                         }
1217                         session::FieldInfo {
1218                             name: name.to_string(),
1219                             offset: offset.bytes(),
1220                             size: field_layout.size.bytes(),
1221                             align: field_layout.align.abi(),
1222                         }
1223                     }
1224                 }
1225             }).collect();
1226
1227             session::VariantInfo {
1228                 name: n.map(|n|n.to_string()),
1229                 kind: if layout.is_unsized() {
1230                     session::SizeKind::Min
1231                 } else {
1232                     session::SizeKind::Exact
1233                 },
1234                 align: layout.align.abi(),
1235                 size: if min_size.bytes() == 0 {
1236                     layout.size.bytes()
1237                 } else {
1238                     min_size.bytes()
1239                 },
1240                 fields: field_info,
1241             }
1242         };
1243
1244         match layout.variants {
1245             Variants::Single { index } => {
1246                 debug!("print-type-size `{:#?}` variant {}",
1247                        layout, adt_def.variants[index].name);
1248                 if !adt_def.variants.is_empty() {
1249                     let variant_def = &adt_def.variants[index];
1250                     let fields: Vec<_> =
1251                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1252                     record(adt_kind.into(),
1253                            adt_packed,
1254                            None,
1255                            vec![build_variant_info(Some(variant_def.name),
1256                                                    &fields,
1257                                                    layout)]);
1258                 } else {
1259                     // (This case arises for *empty* enums; so give it
1260                     // zero variants.)
1261                     record(adt_kind.into(), adt_packed, None, vec![]);
1262                 }
1263             }
1264
1265             Variants::NicheFilling { .. } |
1266             Variants::Tagged { .. } => {
1267                 debug!("print-type-size `{:#?}` adt general variants def {}",
1268                        layout.ty, adt_def.variants.len());
1269                 let variant_infos: Vec<_> =
1270                     adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1271                         let fields: Vec<_> =
1272                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1273                         build_variant_info(Some(variant_def.name),
1274                                            &fields,
1275                                            layout.for_variant(self, i))
1276                     })
1277                     .collect();
1278                 record(adt_kind.into(), adt_packed, match layout.variants {
1279                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1280                     _ => None
1281                 }, variant_infos);
1282             }
1283         }
1284     }
1285 }
1286
1287 /// Type size "skeleton", i.e. the only information determining a type's size.
1288 /// While this is conservative, (aside from constant sizes, only pointers,
1289 /// newtypes thereof and null pointer optimized enums are allowed), it is
1290 /// enough to statically check common usecases of transmute.
1291 #[derive(Copy, Clone, Debug)]
1292 pub enum SizeSkeleton<'tcx> {
1293     /// Any statically computable Layout.
1294     Known(Size),
1295
1296     /// A potentially-fat pointer.
1297     Pointer {
1298         /// If true, this pointer is never null.
1299         non_zero: bool,
1300         /// The type which determines the unsized metadata, if any,
1301         /// of this pointer. Either a type parameter or a projection
1302         /// depending on one, with regions erased.
1303         tail: Ty<'tcx>
1304     }
1305 }
1306
1307 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1308     pub fn compute(ty: Ty<'tcx>,
1309                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1310                    param_env: ty::ParamEnv<'tcx>)
1311                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1312         debug_assert!(!ty.has_infer_types());
1313
1314         // First try computing a static layout.
1315         let err = match tcx.layout_of(param_env.and(ty)) {
1316             Ok(layout) => {
1317                 return Ok(SizeSkeleton::Known(layout.size));
1318             }
1319             Err(err) => err
1320         };
1321
1322         match ty.sty {
1323             ty::Ref(_, pointee, _) |
1324             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1325                 let non_zero = !ty.is_unsafe_ptr();
1326                 let tail = tcx.struct_tail(pointee);
1327                 match tail.sty {
1328                     ty::Param(_) | ty::Projection(_) => {
1329                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1330                         Ok(SizeSkeleton::Pointer {
1331                             non_zero,
1332                             tail: tcx.erase_regions(&tail)
1333                         })
1334                     }
1335                     _ => {
1336                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1337                               tail `{}` is not a type parameter or a projection",
1338                              ty, err, tail)
1339                     }
1340                 }
1341             }
1342
1343             ty::Adt(def, substs) => {
1344                 // Only newtypes and enums w/ nullable pointer optimization.
1345                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1346                     return Err(err);
1347                 }
1348
1349                 // Get a zero-sized variant or a pointer newtype.
1350                 let zero_or_ptr_variant = |i| {
1351                     let i = VariantIdx::new(i);
1352                     let fields = def.variants[i].fields.iter().map(|field| {
1353                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1354                     });
1355                     let mut ptr = None;
1356                     for field in fields {
1357                         let field = field?;
1358                         match field {
1359                             SizeSkeleton::Known(size) => {
1360                                 if size.bytes() > 0 {
1361                                     return Err(err);
1362                                 }
1363                             }
1364                             SizeSkeleton::Pointer {..} => {
1365                                 if ptr.is_some() {
1366                                     return Err(err);
1367                                 }
1368                                 ptr = Some(field);
1369                             }
1370                         }
1371                     }
1372                     Ok(ptr)
1373                 };
1374
1375                 let v0 = zero_or_ptr_variant(0)?;
1376                 // Newtype.
1377                 if def.variants.len() == 1 {
1378                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1379                         return Ok(SizeSkeleton::Pointer {
1380                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1381                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1382                                 (Bound::Included(start), Bound::Included(end)) =>
1383                                     0 < start && start < end,
1384                                 _ => false,
1385                             },
1386                             tail,
1387                         });
1388                     } else {
1389                         return Err(err);
1390                     }
1391                 }
1392
1393                 let v1 = zero_or_ptr_variant(1)?;
1394                 // Nullable pointer enum optimization.
1395                 match (v0, v1) {
1396                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1397                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1398                         Ok(SizeSkeleton::Pointer {
1399                             non_zero: false,
1400                             tail,
1401                         })
1402                     }
1403                     _ => Err(err)
1404                 }
1405             }
1406
1407             ty::Projection(_) | ty::Opaque(..) => {
1408                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1409                 if ty == normalized {
1410                     Err(err)
1411                 } else {
1412                     SizeSkeleton::compute(normalized, tcx, param_env)
1413                 }
1414             }
1415
1416             _ => Err(err)
1417         }
1418     }
1419
1420     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1421         match (self, other) {
1422             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1423             (SizeSkeleton::Pointer { tail: a, .. },
1424              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1425             _ => false
1426         }
1427     }
1428 }
1429
1430 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1431     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1432 }
1433
1434 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1435     fn data_layout(&self) -> &TargetDataLayout {
1436         &self.data_layout
1437     }
1438 }
1439
1440 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1441     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1442         self.global_tcx()
1443     }
1444 }
1445
1446 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1447     fn data_layout(&self) -> &TargetDataLayout {
1448         self.tcx.data_layout()
1449     }
1450 }
1451
1452 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1453     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1454         self.tcx.tcx()
1455     }
1456 }
1457
1458 pub trait MaybeResult<T> {
1459     fn from_ok(x: T) -> Self;
1460     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1461 }
1462
1463 impl<T> MaybeResult<T> for T {
1464     fn from_ok(x: T) -> Self {
1465         x
1466     }
1467     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1468         f(self)
1469     }
1470 }
1471
1472 impl<T, E> MaybeResult<T> for Result<T, E> {
1473     fn from_ok(x: T) -> Self {
1474         Ok(x)
1475     }
1476     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1477         self.map(f)
1478     }
1479 }
1480
1481 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1482
1483 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1484     type Ty = Ty<'tcx>;
1485     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1486
1487     /// Computes the layout of a type. Note that this implicitly
1488     /// executes in "reveal all" mode.
1489     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1490         let param_env = self.param_env.with_reveal_all();
1491         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1492         let details = self.tcx.layout_raw(param_env.and(ty))?;
1493         let layout = TyLayout {
1494             ty,
1495             details
1496         };
1497
1498         // NB: This recording is normally disabled; when enabled, it
1499         // can however trigger recursive invocations of `layout_of`.
1500         // Therefore, we execute it *after* the main query has
1501         // completed, to avoid problems around recursive structures
1502         // and the like. (Admittedly, I wasn't able to reproduce a problem
1503         // here, but it seems like the right thing to do. -nmatsakis)
1504         self.record_layout_for_printing(layout);
1505
1506         Ok(layout)
1507     }
1508 }
1509
1510 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1511     type Ty = Ty<'tcx>;
1512     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1513
1514     /// Computes the layout of a type. Note that this implicitly
1515     /// executes in "reveal all" mode.
1516     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1517         let param_env = self.param_env.with_reveal_all();
1518         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1519         let details = self.tcx.layout_raw(param_env.and(ty))?;
1520         let layout = TyLayout {
1521             ty,
1522             details
1523         };
1524
1525         // NB: This recording is normally disabled; when enabled, it
1526         // can however trigger recursive invocations of `layout_of`.
1527         // Therefore, we execute it *after* the main query has
1528         // completed, to avoid problems around recursive structures
1529         // and the like. (Admittedly, I wasn't able to reproduce a problem
1530         // here, but it seems like the right thing to do. -nmatsakis)
1531         let cx = LayoutCx {
1532             tcx: *self.tcx,
1533             param_env: self.param_env
1534         };
1535         cx.record_layout_for_printing(layout);
1536
1537         Ok(layout)
1538     }
1539 }
1540
1541 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1542 impl TyCtxt<'a, 'tcx, '_> {
1543     /// Computes the layout of a type. Note that this implicitly
1544     /// executes in "reveal all" mode.
1545     #[inline]
1546     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1547                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1548         let cx = LayoutCx {
1549             tcx: self.global_tcx(),
1550             param_env: param_env_and_ty.param_env
1551         };
1552         cx.layout_of(param_env_and_ty.value)
1553     }
1554 }
1555
1556 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1557     /// Computes the layout of a type. Note that this implicitly
1558     /// executes in "reveal all" mode.
1559     #[inline]
1560     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1561                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1562         let cx = LayoutCx {
1563             tcx: self.global_tcx().at(self.span),
1564             param_env: param_env_and_ty.param_env
1565         };
1566         cx.layout_of(param_env_and_ty.value)
1567     }
1568 }
1569
1570 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1571     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1572           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1573 {
1574     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1575         let details = match this.variants {
1576             Variants::Single { index } if index == variant_index => this.details,
1577
1578             Variants::Single { index } => {
1579                 // Deny calling for_variant more than once for non-Single enums.
1580                 cx.layout_of(this.ty).map_same(|layout| {
1581                     assert_eq!(layout.variants, Variants::Single { index });
1582                     layout
1583                 });
1584
1585                 let fields = match this.ty.sty {
1586                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1587                     _ => bug!()
1588                 };
1589                 let tcx = cx.tcx();
1590                 tcx.intern_layout(LayoutDetails {
1591                     variants: Variants::Single { index: variant_index },
1592                     fields: FieldPlacement::Union(fields),
1593                     abi: Abi::Uninhabited,
1594                     align: tcx.data_layout.i8_align,
1595                     size: Size::ZERO
1596                 })
1597             }
1598
1599             Variants::NicheFilling { ref variants, .. } |
1600             Variants::Tagged { ref variants, .. } => {
1601                 &variants[variant_index]
1602             }
1603         };
1604
1605         assert_eq!(details.variants, Variants::Single { index: variant_index });
1606
1607         TyLayout {
1608             ty: this.ty,
1609             details
1610         }
1611     }
1612
1613     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1614         let tcx = cx.tcx();
1615         cx.layout_of(match this.ty.sty {
1616             ty::Bool |
1617             ty::Char |
1618             ty::Int(_) |
1619             ty::Uint(_) |
1620             ty::Float(_) |
1621             ty::FnPtr(_) |
1622             ty::Never |
1623             ty::FnDef(..) |
1624             ty::GeneratorWitness(..) |
1625             ty::Foreign(..) |
1626             ty::Dynamic(..) => {
1627                 bug!("TyLayout::field_type({:?}): not applicable", this)
1628             }
1629
1630             // Potentially-fat pointers.
1631             ty::Ref(_, pointee, _) |
1632             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1633                 assert!(i < this.fields.count());
1634
1635                 // Reuse the fat *T type as its own thin pointer data field.
1636                 // This provides information about e.g. DST struct pointees
1637                 // (which may have no non-DST form), and will work as long
1638                 // as the `Abi` or `FieldPlacement` is checked by users.
1639                 if i == 0 {
1640                     let nil = tcx.mk_unit();
1641                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1642                         tcx.mk_mut_ptr(nil)
1643                     } else {
1644                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1645                     };
1646                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1647                         ptr_layout.ty = this.ty;
1648                         ptr_layout
1649                     });
1650                 }
1651
1652                 match tcx.struct_tail(pointee).sty {
1653                     ty::Slice(_) |
1654                     ty::Str => tcx.types.usize,
1655                     ty::Dynamic(_, _) => {
1656                         tcx.mk_imm_ref(
1657                             tcx.types.re_static,
1658                             tcx.mk_array(tcx.types.usize, 3),
1659                         )
1660                         /* FIXME use actual fn pointers
1661                         Warning: naively computing the number of entries in the
1662                         vtable by counting the methods on the trait + methods on
1663                         all parent traits does not work, because some methods can
1664                         be not object safe and thus excluded from the vtable.
1665                         Increase this counter if you tried to implement this but
1666                         failed to do it without duplicating a lot of code from
1667                         other places in the compiler: 2
1668                         tcx.mk_tup(&[
1669                             tcx.mk_array(tcx.types.usize, 3),
1670                             tcx.mk_array(Option<fn()>),
1671                         ])
1672                         */
1673                     }
1674                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1675                 }
1676             }
1677
1678             // Arrays and slices.
1679             ty::Array(element, _) |
1680             ty::Slice(element) => element,
1681             ty::Str => tcx.types.u8,
1682
1683             // Tuples, generators and closures.
1684             ty::Closure(def_id, ref substs) => {
1685                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1686             }
1687
1688             ty::Generator(def_id, ref substs, _) => {
1689                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1690             }
1691
1692             ty::Tuple(tys) => tys[i],
1693
1694             // SIMD vector types.
1695             ty::Adt(def, ..) if def.repr.simd() => {
1696                 this.ty.simd_type(tcx)
1697             }
1698
1699             // ADTs.
1700             ty::Adt(def, substs) => {
1701                 match this.variants {
1702                     Variants::Single { index } => {
1703                         def.variants[index].fields[i].ty(tcx, substs)
1704                     }
1705
1706                     // Discriminant field for enums (where applicable).
1707                     Variants::Tagged { tag: ref discr, .. } |
1708                     Variants::NicheFilling { niche: ref discr, .. } => {
1709                         assert_eq!(i, 0);
1710                         let layout = LayoutDetails::scalar(cx, discr.clone());
1711                         return MaybeResult::from_ok(TyLayout {
1712                             details: tcx.intern_layout(layout),
1713                             ty: discr.value.to_ty(tcx)
1714                         });
1715                     }
1716                 }
1717             }
1718
1719             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1720             ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1721                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1722             }
1723         })
1724     }
1725 }
1726
1727 struct Niche {
1728     offset: Size,
1729     scalar: Scalar,
1730     available: u128,
1731 }
1732
1733 impl Niche {
1734     fn reserve<'a, 'tcx>(
1735         &self,
1736         cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1737         count: u128,
1738     ) -> Option<(u128, Scalar)> {
1739         if count > self.available {
1740             return None;
1741         }
1742         let Scalar { value, valid_range: ref v } = self.scalar;
1743         let bits = value.size(cx).bits();
1744         assert!(bits <= 128);
1745         let max_value = !0u128 >> (128 - bits);
1746         let start = v.end().wrapping_add(1) & max_value;
1747         let end = v.end().wrapping_add(count) & max_value;
1748         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1749     }
1750 }
1751
1752 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1753     /// Find the offset of a niche leaf field, starting from
1754     /// the given type and recursing through aggregates.
1755     // FIXME(eddyb) traverse already optimized enums.
1756     fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1757         let scalar_niche = |scalar: &Scalar, offset| {
1758             let Scalar { value, valid_range: ref v } = *scalar;
1759
1760             let bits = value.size(self).bits();
1761             assert!(bits <= 128);
1762             let max_value = !0u128 >> (128 - bits);
1763
1764             // Find out how many values are outside the valid range.
1765             let available = if v.start() <= v.end() {
1766                 v.start() + (max_value - v.end())
1767             } else {
1768                 v.start() - v.end() - 1
1769             };
1770
1771             // Give up if there is no niche value available.
1772             if available == 0 {
1773                 return None;
1774             }
1775
1776             Some(Niche { offset, scalar: scalar.clone(), available })
1777         };
1778
1779         // Locals variables which live across yields are stored
1780         // in the generator type as fields. These may be uninitialized
1781         // so we don't look for niches there.
1782         if let ty::Generator(..) = layout.ty.sty {
1783             return Ok(None);
1784         }
1785
1786         match layout.abi {
1787             Abi::Scalar(ref scalar) => {
1788                 return Ok(scalar_niche(scalar, Size::ZERO));
1789             }
1790             Abi::ScalarPair(ref a, ref b) => {
1791                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1792                 // returns the last maximum.
1793                 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1794                     .chain(iter::once((a, Size::ZERO)))
1795                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1796                     .max_by_key(|niche| niche.available);
1797                 return Ok(niche);
1798             }
1799             Abi::Vector { ref element, .. } => {
1800                 return Ok(scalar_niche(element, Size::ZERO));
1801             }
1802             _ => {}
1803         }
1804
1805         // Perhaps one of the fields is non-zero, let's recurse and find out.
1806         if let FieldPlacement::Union(_) = layout.fields {
1807             // Only Rust enums have safe-to-inspect fields
1808             // (a discriminant), other unions are unsafe.
1809             if let Variants::Single { .. } = layout.variants {
1810                 return Ok(None);
1811             }
1812         }
1813         if let FieldPlacement::Array { .. } = layout.fields {
1814             if layout.fields.count() > 0 {
1815                 return self.find_niche(layout.field(self, 0)?);
1816             } else {
1817                 return Ok(None);
1818             }
1819         }
1820         let mut niche = None;
1821         let mut available = 0;
1822         for i in 0..layout.fields.count() {
1823             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1824                 if c.available > available {
1825                     available = c.available;
1826                     c.offset += layout.fields.offset(i);
1827                     niche = Some(c);
1828                 }
1829             }
1830         }
1831         Ok(niche)
1832     }
1833 }
1834
1835 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1836     fn hash_stable<W: StableHasherResult>(&self,
1837                                           hcx: &mut StableHashingContext<'a>,
1838                                           hasher: &mut StableHasher<W>) {
1839         use ty::layout::Variants::*;
1840         mem::discriminant(self).hash_stable(hcx, hasher);
1841
1842         match *self {
1843             Single { index } => {
1844                 index.hash_stable(hcx, hasher);
1845             }
1846             Tagged {
1847                 ref tag,
1848                 ref variants,
1849             } => {
1850                 tag.hash_stable(hcx, hasher);
1851                 variants.hash_stable(hcx, hasher);
1852             }
1853             NicheFilling {
1854                 dataful_variant,
1855                 ref niche_variants,
1856                 ref niche,
1857                 niche_start,
1858                 ref variants,
1859             } => {
1860                 dataful_variant.hash_stable(hcx, hasher);
1861                 niche_variants.start().hash_stable(hcx, hasher);
1862                 niche_variants.end().hash_stable(hcx, hasher);
1863                 niche.hash_stable(hcx, hasher);
1864                 niche_start.hash_stable(hcx, hasher);
1865                 variants.hash_stable(hcx, hasher);
1866             }
1867         }
1868     }
1869 }
1870
1871 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1872     fn hash_stable<W: StableHasherResult>(&self,
1873                                           hcx: &mut StableHashingContext<'a>,
1874                                           hasher: &mut StableHasher<W>) {
1875         use ty::layout::FieldPlacement::*;
1876         mem::discriminant(self).hash_stable(hcx, hasher);
1877
1878         match *self {
1879             Union(count) => {
1880                 count.hash_stable(hcx, hasher);
1881             }
1882             Array { count, stride } => {
1883                 count.hash_stable(hcx, hasher);
1884                 stride.hash_stable(hcx, hasher);
1885             }
1886             Arbitrary { ref offsets, ref memory_index } => {
1887                 offsets.hash_stable(hcx, hasher);
1888                 memory_index.hash_stable(hcx, hasher);
1889             }
1890         }
1891     }
1892 }
1893
1894 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1895     fn hash_stable<W: StableHasherResult>(
1896         &self,
1897         hcx: &mut StableHashingContext<'a>,
1898         hasher: &mut StableHasher<W>,
1899     ) {
1900         self.as_u32().hash_stable(hcx, hasher)
1901     }
1902 }
1903
1904 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1905     fn hash_stable<W: StableHasherResult>(&self,
1906                                           hcx: &mut StableHashingContext<'a>,
1907                                           hasher: &mut StableHasher<W>) {
1908         use ty::layout::Abi::*;
1909         mem::discriminant(self).hash_stable(hcx, hasher);
1910
1911         match *self {
1912             Uninhabited => {}
1913             Scalar(ref value) => {
1914                 value.hash_stable(hcx, hasher);
1915             }
1916             ScalarPair(ref a, ref b) => {
1917                 a.hash_stable(hcx, hasher);
1918                 b.hash_stable(hcx, hasher);
1919             }
1920             Vector { ref element, count } => {
1921                 element.hash_stable(hcx, hasher);
1922                 count.hash_stable(hcx, hasher);
1923             }
1924             Aggregate { sized } => {
1925                 sized.hash_stable(hcx, hasher);
1926             }
1927         }
1928     }
1929 }
1930
1931 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1932     fn hash_stable<W: StableHasherResult>(&self,
1933                                           hcx: &mut StableHashingContext<'a>,
1934                                           hasher: &mut StableHasher<W>) {
1935         let Scalar { value, ref valid_range } = *self;
1936         value.hash_stable(hcx, hasher);
1937         valid_range.start().hash_stable(hcx, hasher);
1938         valid_range.end().hash_stable(hcx, hasher);
1939     }
1940 }
1941
1942 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1943     variants,
1944     fields,
1945     abi,
1946     size,
1947     align
1948 });
1949
1950 impl_stable_hash_for!(enum ::ty::layout::Integer {
1951     I8,
1952     I16,
1953     I32,
1954     I64,
1955     I128
1956 });
1957
1958 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1959     Int(integer, signed),
1960     Float(fty),
1961     Pointer
1962 });
1963
1964 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1965     fn hash_stable<W: StableHasherResult>(&self,
1966                                           hcx: &mut StableHashingContext<'gcx>,
1967                                           hasher: &mut StableHasher<W>) {
1968         self.abi().hash_stable(hcx, hasher);
1969         self.pref().hash_stable(hcx, hasher);
1970     }
1971 }
1972
1973 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1974     fn hash_stable<W: StableHasherResult>(&self,
1975                                           hcx: &mut StableHashingContext<'gcx>,
1976                                           hasher: &mut StableHasher<W>) {
1977         self.bytes().hash_stable(hcx, hasher);
1978     }
1979 }
1980
1981 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1982 {
1983     fn hash_stable<W: StableHasherResult>(&self,
1984                                           hcx: &mut StableHashingContext<'a>,
1985                                           hasher: &mut StableHasher<W>) {
1986         use ty::layout::LayoutError::*;
1987         mem::discriminant(self).hash_stable(hcx, hasher);
1988
1989         match *self {
1990             Unknown(t) |
1991             SizeOverflow(t) => t.hash_stable(hcx, hasher)
1992         }
1993     }
1994 }