]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Rollup merge of #53931 - iirelu:keyword-docs, r=steveklabnik
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23 use std::ops::Bound;
24
25 use ich::StableHashingContext;
26 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
27                                            StableHasherResult};
28
29 pub use rustc_target::abi::*;
30
31 pub trait IntegerExt {
32     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
33     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
34     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
35                             ty: Ty<'tcx>,
36                             repr: &ReprOptions,
37                             min: i128,
38                             max: i128)
39                             -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Get the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Find the appropriate Integer type and signedness for the given
75     /// signed discriminant range and #[repr] attribute.
76     /// N.B.: u128 values above i128::MAX will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
79                             ty: Ty<'tcx>,
80                             repr: &ReprOptions,
81                             min: i128,
82                             max: i128)
83                             -> (Integer, bool) {
84         // Theoretically, negative values could be larger in unsigned representation
85         // than the unsigned representation of the signed minimum. However, if there
86         // are any negative values, the only valid unsigned representation is u128
87         // which can fit all i128 values, so the result remains unaffected.
88         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
89         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
90
91         let mut min_from_extern = None;
92         let min_default = I8;
93
94         if let Some(ity) = repr.int {
95             let discr = Integer::from_attr(tcx, ity);
96             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
97             if discr < fit {
98                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
99                       discriminant range of enum `{}", ty)
100             }
101             return (discr, ity.is_signed());
102         }
103
104         if repr.c() {
105             match &tcx.sess.target.target.arch[..] {
106                 // WARNING: the ARM EABI has two variants; the one corresponding
107                 // to `at_least == I32` appears to be used on Linux and NetBSD,
108                 // but some systems may use the variant corresponding to no
109                 // lower bound. However, we don't run on those yet...?
110                 "arm" => min_from_extern = Some(I32),
111                 _ => min_from_extern = Some(I32),
112             }
113         }
114
115         let at_least = min_from_extern.unwrap_or(min_default);
116
117         // If there are no negative values, we can use the unsigned fit.
118         if min >= 0 {
119             (cmp::max(unsigned_fit, at_least), false)
120         } else {
121             (cmp::max(signed_fit, at_least), true)
122         }
123     }
124 }
125
126 pub trait PrimitiveExt {
127     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
128 }
129
130 impl PrimitiveExt for Primitive {
131     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
132         match *self {
133             Int(i, signed) => i.to_ty(tcx, signed),
134             Float(FloatTy::F32) => tcx.types.f32,
135             Float(FloatTy::F64) => tcx.types.f64,
136             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
137         }
138     }
139 }
140
141 /// The first half of a fat pointer.
142 ///
143 /// - For a trait object, this is the address of the box.
144 /// - For a slice, this is the base address.
145 pub const FAT_PTR_ADDR: usize = 0;
146
147 /// The second half of a fat pointer.
148 ///
149 /// - For a trait object, this is the address of the vtable.
150 /// - For a slice, this is the length.
151 pub const FAT_PTR_EXTRA: usize = 1;
152
153 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
154 pub enum LayoutError<'tcx> {
155     Unknown(Ty<'tcx>),
156     SizeOverflow(Ty<'tcx>)
157 }
158
159 impl<'tcx> fmt::Display for LayoutError<'tcx> {
160     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
161         match *self {
162             LayoutError::Unknown(ty) => {
163                 write!(f, "the type `{:?}` has an unknown layout", ty)
164             }
165             LayoutError::SizeOverflow(ty) => {
166                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
167             }
168         }
169     }
170 }
171
172 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
173                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
174                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
175 {
176     ty::tls::with_related_context(tcx, move |icx| {
177         let rec_limit = *tcx.sess.recursion_limit.get();
178         let (param_env, ty) = query.into_parts();
179
180         if icx.layout_depth > rec_limit {
181             tcx.sess.fatal(
182                 &format!("overflow representing the type `{}`", ty));
183         }
184
185         // Update the ImplicitCtxt to increase the layout_depth
186         let icx = ty::tls::ImplicitCtxt {
187             layout_depth: icx.layout_depth + 1,
188             ..icx.clone()
189         };
190
191         ty::tls::enter_context(&icx, |_| {
192             let cx = LayoutCx { tcx, param_env };
193             cx.layout_raw_uncached(ty)
194         })
195     })
196 }
197
198 pub fn provide(providers: &mut ty::query::Providers<'_>) {
199     *providers = ty::query::Providers {
200         layout_raw,
201         ..*providers
202     };
203 }
204
205 #[derive(Copy, Clone)]
206 pub struct LayoutCx<'tcx, C> {
207     pub tcx: C,
208     pub param_env: ty::ParamEnv<'tcx>
209 }
210
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212     fn layout_raw_uncached(self, ty: Ty<'tcx>)
213                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214         let tcx = self.tcx;
215         let param_env = self.param_env;
216         let dl = self.data_layout();
217         let scalar_unit = |value: Primitive| {
218             let bits = value.size(dl).bits();
219             assert!(bits <= 128);
220             Scalar {
221                 value,
222                 valid_range: 0..=(!0 >> (128 - bits))
223             }
224         };
225         let scalar = |value: Primitive| {
226             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227         };
228         let scalar_pair = |a: Scalar, b: Scalar| {
229             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231             let size = (b_offset + b.value.size(dl)).abi_align(align);
232             LayoutDetails {
233                 variants: Variants::Single { index: 0 },
234                 fields: FieldPlacement::Arbitrary {
235                     offsets: vec![Size::ZERO, b_offset],
236                     memory_index: vec![0, 1]
237                 },
238                 abi: Abi::ScalarPair(a, b),
239                 align,
240                 size
241             }
242         };
243
244         #[derive(Copy, Clone, Debug)]
245         enum StructKind {
246             /// A tuple, closure, or univariant which cannot be coerced to unsized.
247             AlwaysSized,
248             /// A univariant, the last field of which may be coerced to unsized.
249             MaybeUnsized,
250             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251             Prefixed(Size, Align),
252         }
253
254         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
255             let packed = repr.packed();
256             if packed && repr.align > 0 {
257                 bug!("struct cannot be packed and aligned");
258             }
259
260             let pack = {
261                 let pack = repr.pack as u64;
262                 Align::from_bytes(pack, pack).unwrap()
263             };
264
265             let mut align = if packed {
266                 dl.i8_align
267             } else {
268                 dl.aggregate_align
269             };
270
271             let mut sized = true;
272             let mut offsets = vec![Size::ZERO; fields.len()];
273             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
274
275             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
276             if let StructKind::Prefixed(_, align) = kind {
277                 optimize &= align.abi() == 1;
278             }
279
280             if optimize {
281                 let end = if let StructKind::MaybeUnsized = kind {
282                     fields.len() - 1
283                 } else {
284                     fields.len()
285                 };
286                 let optimizing = &mut inverse_memory_index[..end];
287                 let field_align = |f: &TyLayout<'_>| {
288                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
289                 };
290                 match kind {
291                     StructKind::AlwaysSized |
292                     StructKind::MaybeUnsized => {
293                         optimizing.sort_by_key(|&x| {
294                             // Place ZSTs first to avoid "interesting offsets",
295                             // especially with only one or two non-ZST fields.
296                             let f = &fields[x as usize];
297                             (!f.is_zst(), cmp::Reverse(field_align(f)))
298                         });
299                     }
300                     StructKind::Prefixed(..) => {
301                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
302                     }
303                 }
304             }
305
306             // inverse_memory_index holds field indices by increasing memory offset.
307             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
308             // We now write field offsets to the corresponding offset slot;
309             // field 5 with offset 0 puts 0 in offsets[5].
310             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
311
312             let mut offset = Size::ZERO;
313
314             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
315                 if packed {
316                     let prefix_align = prefix_align.min(pack);
317                     align = align.max(prefix_align);
318                 } else {
319                     align = align.max(prefix_align);
320                 }
321                 offset = prefix_size.abi_align(prefix_align);
322             }
323
324             for &i in &inverse_memory_index {
325                 let field = fields[i as usize];
326                 if !sized {
327                     bug!("univariant: field #{} of `{}` comes after unsized field",
328                          offsets.len(), ty);
329                 }
330
331                 if field.is_unsized() {
332                     sized = false;
333                 }
334
335                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
336                 if packed {
337                     let field_pack = field.align.min(pack);
338                     offset = offset.abi_align(field_pack);
339                     align = align.max(field_pack);
340                 }
341                 else {
342                     offset = offset.abi_align(field.align);
343                     align = align.max(field.align);
344                 }
345
346                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
347                 offsets[i as usize] = offset;
348
349                 offset = offset.checked_add(field.size, dl)
350                     .ok_or(LayoutError::SizeOverflow(ty))?;
351             }
352
353             if repr.align > 0 {
354                 let repr_align = repr.align as u64;
355                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
356                 debug!("univariant repr_align: {:?}", repr_align);
357             }
358
359             debug!("univariant min_size: {:?}", offset);
360             let min_size = offset;
361
362             // As stated above, inverse_memory_index holds field indices by increasing offset.
363             // This makes it an already-sorted view of the offsets vec.
364             // To invert it, consider:
365             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
366             // Field 5 would be the first element, so memory_index is i:
367             // Note: if we didn't optimize, it's already right.
368
369             let mut memory_index;
370             if optimize {
371                 memory_index = vec![0; inverse_memory_index.len()];
372
373                 for i in 0..inverse_memory_index.len() {
374                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
375                 }
376             } else {
377                 memory_index = inverse_memory_index;
378             }
379
380             let size = min_size.abi_align(align);
381             let mut abi = Abi::Aggregate { sized };
382
383             // Unpack newtype ABIs and find scalar pairs.
384             if sized && size.bytes() > 0 {
385                 // All other fields must be ZSTs, and we need them to all start at 0.
386                 let mut zst_offsets =
387                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
388                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
389                     let mut non_zst_fields =
390                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
391
392                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
393                         // We have exactly one non-ZST field.
394                         (Some((i, field)), None, None) => {
395                             // Field fills the struct and it has a scalar or scalar pair ABI.
396                             if offsets[i].bytes() == 0 &&
397                                align.abi() == field.align.abi() &&
398                                size == field.size {
399                                 match field.abi {
400                                     // For plain scalars, or vectors of them, we can't unpack
401                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
402                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
403                                         abi = field.abi.clone();
404                                     }
405                                     // But scalar pairs are Rust-specific and get
406                                     // treated as aggregates by C ABIs anyway.
407                                     Abi::ScalarPair(..) => {
408                                         abi = field.abi.clone();
409                                     }
410                                     _ => {}
411                                 }
412                             }
413                         }
414
415                         // Two non-ZST fields, and they're both scalars.
416                         (Some((i, &TyLayout {
417                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
418                         })), Some((j, &TyLayout {
419                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
420                         })), None) => {
421                             // Order by the memory placement, not source order.
422                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
423                                 ((i, a), (j, b))
424                             } else {
425                                 ((j, b), (i, a))
426                             };
427                             let pair = scalar_pair(a.clone(), b.clone());
428                             let pair_offsets = match pair.fields {
429                                 FieldPlacement::Arbitrary {
430                                     ref offsets,
431                                     ref memory_index
432                                 } => {
433                                     assert_eq!(memory_index, &[0, 1]);
434                                     offsets
435                                 }
436                                 _ => bug!()
437                             };
438                             if offsets[i] == pair_offsets[0] &&
439                                offsets[j] == pair_offsets[1] &&
440                                align == pair.align &&
441                                size == pair.size {
442                                 // We can use `ScalarPair` only when it matches our
443                                 // already computed layout (including `#[repr(C)]`).
444                                 abi = pair.abi;
445                             }
446                         }
447
448                         _ => {}
449                     }
450                 }
451             }
452
453             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
454                 abi = Abi::Uninhabited;
455             }
456
457             Ok(LayoutDetails {
458                 variants: Variants::Single { index: 0 },
459                 fields: FieldPlacement::Arbitrary {
460                     offsets,
461                     memory_index
462                 },
463                 abi,
464                 align,
465                 size
466             })
467         };
468         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
469             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
470         };
471         debug_assert!(!ty.has_infer_types());
472
473         Ok(match ty.sty {
474             // Basic scalars.
475             ty::Bool => {
476                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
477                     value: Int(I8, false),
478                     valid_range: 0..=1
479                 }))
480             }
481             ty::Char => {
482                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
483                     value: Int(I32, false),
484                     valid_range: 0..=0x10FFFF
485                 }))
486             }
487             ty::Int(ity) => {
488                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
489             }
490             ty::Uint(ity) => {
491                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
492             }
493             ty::Float(fty) => scalar(Float(fty)),
494             ty::FnPtr(_) => {
495                 let mut ptr = scalar_unit(Pointer);
496                 ptr.valid_range = 1..=*ptr.valid_range.end();
497                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
498             }
499
500             // The never type.
501             ty::Never => {
502                 tcx.intern_layout(LayoutDetails {
503                     variants: Variants::Single { index: 0 },
504                     fields: FieldPlacement::Union(0),
505                     abi: Abi::Uninhabited,
506                     align: dl.i8_align,
507                     size: Size::ZERO
508                 })
509             }
510
511             // Potentially-fat pointers.
512             ty::Ref(_, pointee, _) |
513             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514                 let mut data_ptr = scalar_unit(Pointer);
515                 if !ty.is_unsafe_ptr() {
516                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517                 }
518
519                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
522                 }
523
524                 let unsized_part = tcx.struct_tail(pointee);
525                 let metadata = match unsized_part.sty {
526                     ty::Foreign(..) => {
527                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
528                     }
529                     ty::Slice(_) | ty::Str => {
530                         scalar_unit(Int(dl.ptr_sized_integer(), false))
531                     }
532                     ty::Dynamic(..) => {
533                         let mut vtable = scalar_unit(Pointer);
534                         vtable.valid_range = 1..=*vtable.valid_range.end();
535                         vtable
536                     }
537                     _ => return Err(LayoutError::Unknown(unsized_part))
538                 };
539
540                 // Effectively a (ptr, meta) tuple.
541                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
542             }
543
544             // Arrays and slices.
545             ty::Array(element, mut count) => {
546                 if count.has_projections() {
547                     count = tcx.normalize_erasing_regions(param_env, count);
548                     if count.has_projections() {
549                         return Err(LayoutError::Unknown(ty));
550                     }
551                 }
552
553                 let element = self.layout_of(element)?;
554                 let count = count.unwrap_usize(tcx);
555                 let size = element.size.checked_mul(count, dl)
556                     .ok_or(LayoutError::SizeOverflow(ty))?;
557
558                 tcx.intern_layout(LayoutDetails {
559                     variants: Variants::Single { index: 0 },
560                     fields: FieldPlacement::Array {
561                         stride: element.size,
562                         count
563                     },
564                     abi: Abi::Aggregate { sized: true },
565                     align: element.align,
566                     size
567                 })
568             }
569             ty::Slice(element) => {
570                 let element = self.layout_of(element)?;
571                 tcx.intern_layout(LayoutDetails {
572                     variants: Variants::Single { index: 0 },
573                     fields: FieldPlacement::Array {
574                         stride: element.size,
575                         count: 0
576                     },
577                     abi: Abi::Aggregate { sized: false },
578                     align: element.align,
579                     size: Size::ZERO
580                 })
581             }
582             ty::Str => {
583                 tcx.intern_layout(LayoutDetails {
584                     variants: Variants::Single { index: 0 },
585                     fields: FieldPlacement::Array {
586                         stride: Size::from_bytes(1),
587                         count: 0
588                     },
589                     abi: Abi::Aggregate { sized: false },
590                     align: dl.i8_align,
591                     size: Size::ZERO
592                 })
593             }
594
595             // Odd unit types.
596             ty::FnDef(..) => {
597                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
598             }
599             ty::Dynamic(..) | ty::Foreign(..) => {
600                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
601                   StructKind::AlwaysSized)?;
602                 match unit.abi {
603                     Abi::Aggregate { ref mut sized } => *sized = false,
604                     _ => bug!()
605                 }
606                 tcx.intern_layout(unit)
607             }
608
609             // Tuples, generators and closures.
610             ty::Generator(def_id, ref substs, _) => {
611                 let tys = substs.field_tys(def_id, tcx);
612                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
613                     &ReprOptions::default(),
614                     StructKind::AlwaysSized)?
615             }
616
617             ty::Closure(def_id, ref substs) => {
618                 let tys = substs.upvar_tys(def_id, tcx);
619                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620                     &ReprOptions::default(),
621                     StructKind::AlwaysSized)?
622             }
623
624             ty::Tuple(tys) => {
625                 let kind = if tys.len() == 0 {
626                     StructKind::AlwaysSized
627                 } else {
628                     StructKind::MaybeUnsized
629                 };
630
631                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
632                            &ReprOptions::default(), kind)?
633             }
634
635             // SIMD vector types.
636             ty::Adt(def, ..) if def.repr.simd() => {
637                 let element = self.layout_of(ty.simd_type(tcx))?;
638                 let count = ty.simd_size(tcx) as u64;
639                 assert!(count > 0);
640                 let scalar = match element.abi {
641                     Abi::Scalar(ref scalar) => scalar.clone(),
642                     _ => {
643                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
644                                                  a non-machine element type `{}`",
645                                                 ty, element.ty));
646                     }
647                 };
648                 let size = element.size.checked_mul(count, dl)
649                     .ok_or(LayoutError::SizeOverflow(ty))?;
650                 let align = dl.vector_align(size);
651                 let size = size.abi_align(align);
652
653                 tcx.intern_layout(LayoutDetails {
654                     variants: Variants::Single { index: 0 },
655                     fields: FieldPlacement::Array {
656                         stride: element.size,
657                         count
658                     },
659                     abi: Abi::Vector {
660                         element: scalar,
661                         count
662                     },
663                     size,
664                     align,
665                 })
666             }
667
668             // ADTs.
669             ty::Adt(def, substs) => {
670                 // Cache the field layouts.
671                 let variants = def.variants.iter().map(|v| {
672                     v.fields.iter().map(|field| {
673                         self.layout_of(field.ty(tcx, substs))
674                     }).collect::<Result<Vec<_>, _>>()
675                 }).collect::<Result<Vec<_>, _>>()?;
676
677                 if def.is_union() {
678                     let packed = def.repr.packed();
679                     if packed && def.repr.align > 0 {
680                         bug!("Union cannot be packed and aligned");
681                     }
682
683                     let pack = {
684                         let pack = def.repr.pack as u64;
685                         Align::from_bytes(pack, pack).unwrap()
686                     };
687
688                     let mut align = if packed {
689                         dl.i8_align
690                     } else {
691                         dl.aggregate_align
692                     };
693
694                     if def.repr.align > 0 {
695                         let repr_align = def.repr.align as u64;
696                         align = align.max(
697                             Align::from_bytes(repr_align, repr_align).unwrap());
698                     }
699
700                     let mut size = Size::ZERO;
701                     for field in &variants[0] {
702                         assert!(!field.is_unsized());
703
704                         if packed {
705                             let field_pack = field.align.min(pack);
706                             align = align.max(field_pack);
707                         } else {
708                             align = align.max(field.align);
709                         }
710                         size = cmp::max(size, field.size);
711                     }
712
713                     return Ok(tcx.intern_layout(LayoutDetails {
714                         variants: Variants::Single { index: 0 },
715                         fields: FieldPlacement::Union(variants[0].len()),
716                         abi: Abi::Aggregate { sized: true },
717                         align,
718                         size: size.abi_align(align)
719                     }));
720                 }
721
722                 // A variant is absent if it's uninhabited and only has ZST fields.
723                 // Present uninhabited variants only require space for their fields,
724                 // but *not* an encoding of the discriminant (e.g. a tag value).
725                 // See issue #49298 for more details on the need to leave space
726                 // for non-ZST uninhabited data (mostly partial initialization).
727                 let absent = |fields: &[TyLayout<'_>]| {
728                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
729                     let is_zst = fields.iter().all(|f| f.is_zst());
730                     uninhabited && is_zst
731                 };
732                 let (present_first, present_second) = {
733                     let mut present_variants = (0..variants.len()).filter(|&v| {
734                         !absent(&variants[v])
735                     });
736                     (present_variants.next(), present_variants.next())
737                 };
738                 if present_first.is_none() {
739                     // Uninhabited because it has no variants, or only absent ones.
740                     return tcx.layout_raw(param_env.and(tcx.types.never));
741                 }
742
743                 let is_struct = !def.is_enum() ||
744                     // Only one variant is present.
745                     (present_second.is_none() &&
746                     // Representation optimizations are allowed.
747                     !def.repr.inhibit_enum_layout_opt());
748                 if is_struct {
749                     // Struct, or univariant enum equivalent to a struct.
750                     // (Typechecking will reject discriminant-sizing attrs.)
751
752                     let v = present_first.unwrap();
753                     let kind = if def.is_enum() || variants[v].len() == 0 {
754                         StructKind::AlwaysSized
755                     } else {
756                         let param_env = tcx.param_env(def.did);
757                         let last_field = def.variants[v].fields.last().unwrap();
758                         let always_sized = tcx.type_of(last_field.did)
759                                               .is_sized(tcx.at(DUMMY_SP), param_env);
760                         if !always_sized { StructKind::MaybeUnsized }
761                         else { StructKind::AlwaysSized }
762                     };
763
764                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
765                     st.variants = Variants::Single { index: v };
766                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
767                     match st.abi {
768                         Abi::Scalar(ref mut scalar) |
769                         Abi::ScalarPair(ref mut scalar, _) => {
770                             // the asserts ensure that we are not using the
771                             // `#[rustc_layout_scalar_valid_range(n)]`
772                             // attribute to widen the range of anything as that would probably
773                             // result in UB somewhere
774                             if let Bound::Included(start) = start {
775                                 assert!(*scalar.valid_range.start() <= start);
776                                 scalar.valid_range = start..=*scalar.valid_range.end();
777                             }
778                             if let Bound::Included(end) = end {
779                                 assert!(*scalar.valid_range.end() >= end);
780                                 scalar.valid_range = *scalar.valid_range.start()..=end;
781                             }
782                         }
783                         _ => assert!(
784                             start == Bound::Unbounded && end == Bound::Unbounded,
785                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
786                             def,
787                             st,
788                         ),
789                     }
790                     return Ok(tcx.intern_layout(st));
791                 }
792
793                 // The current code for niche-filling relies on variant indices
794                 // instead of actual discriminants, so dataful enums with
795                 // explicit discriminants (RFC #2363) would misbehave.
796                 let no_explicit_discriminants = def.variants.iter().enumerate()
797                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
798
799                 // Niche-filling enum optimization.
800                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
801                     let mut dataful_variant = None;
802                     let mut niche_variants = usize::max_value()..=0;
803
804                     // Find one non-ZST variant.
805                     'variants: for (v, fields) in variants.iter().enumerate() {
806                         if absent(fields) {
807                             continue 'variants;
808                         }
809                         for f in fields {
810                             if !f.is_zst() {
811                                 if dataful_variant.is_none() {
812                                     dataful_variant = Some(v);
813                                     continue 'variants;
814                                 } else {
815                                     dataful_variant = None;
816                                     break 'variants;
817                                 }
818                             }
819                         }
820                         niche_variants = *niche_variants.start().min(&v)..=v;
821                     }
822
823                     if niche_variants.start() > niche_variants.end() {
824                         dataful_variant = None;
825                     }
826
827                     if let Some(i) = dataful_variant {
828                         let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
829                         for (field_index, &field) in variants[i].iter().enumerate() {
830                             let niche = match self.find_niche(field)? {
831                                 Some(niche) => niche,
832                                 _ => continue,
833                             };
834                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
835                                 Some(pair) => pair,
836                                 None => continue,
837                             };
838
839                             let mut align = dl.aggregate_align;
840                             let st = variants.iter().enumerate().map(|(j, v)| {
841                                 let mut st = univariant_uninterned(v,
842                                     &def.repr, StructKind::AlwaysSized)?;
843                                 st.variants = Variants::Single { index: j };
844
845                                 align = align.max(st.align);
846
847                                 Ok(st)
848                             }).collect::<Result<Vec<_>, _>>()?;
849
850                             let offset = st[i].fields.offset(field_index) + niche.offset;
851                             let size = st[i].size;
852
853                             let mut abi = match st[i].abi {
854                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
855                                 Abi::ScalarPair(ref first, ref second) => {
856                                     // We need to use scalar_unit to reset the
857                                     // valid range to the maximal one for that
858                                     // primitive, because only the niche is
859                                     // guaranteed to be initialised, not the
860                                     // other primitive.
861                                     if offset.bytes() == 0 {
862                                         Abi::ScalarPair(
863                                             niche_scalar.clone(),
864                                             scalar_unit(second.value),
865                                         )
866                                     } else {
867                                         Abi::ScalarPair(
868                                             scalar_unit(first.value),
869                                             niche_scalar.clone(),
870                                         )
871                                     }
872                                 }
873                                 _ => Abi::Aggregate { sized: true },
874                             };
875
876                             if st.iter().all(|v| v.abi.is_uninhabited()) {
877                                 abi = Abi::Uninhabited;
878                             }
879
880                             return Ok(tcx.intern_layout(LayoutDetails {
881                                 variants: Variants::NicheFilling {
882                                     dataful_variant: i,
883                                     niche_variants,
884                                     niche: niche_scalar,
885                                     niche_start,
886                                     variants: st,
887                                 },
888                                 fields: FieldPlacement::Arbitrary {
889                                     offsets: vec![offset],
890                                     memory_index: vec![0]
891                                 },
892                                 abi,
893                                 size,
894                                 align,
895                             }));
896                         }
897                     }
898                 }
899
900                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
901                 let discr_type = def.repr.discr_type();
902                 let bits = Integer::from_attr(tcx, discr_type).size().bits();
903                 for (i, discr) in def.discriminants(tcx).enumerate() {
904                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
905                         continue;
906                     }
907                     let mut x = discr.val as i128;
908                     if discr_type.is_signed() {
909                         // sign extend the raw representation to be an i128
910                         x = (x << (128 - bits)) >> (128 - bits);
911                     }
912                     if x < min { min = x; }
913                     if x > max { max = x; }
914                 }
915                 // We might have no inhabited variants, so pretend there's at least one.
916                 if (min, max) == (i128::max_value(), i128::min_value()) {
917                     min = 0;
918                     max = 0;
919                 }
920                 assert!(min <= max, "discriminant range is {}...{}", min, max);
921                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
922
923                 let mut align = dl.aggregate_align;
924                 let mut size = Size::ZERO;
925
926                 // We're interested in the smallest alignment, so start large.
927                 let mut start_align = Align::from_bytes(256, 256).unwrap();
928                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
929
930                 // repr(C) on an enum tells us to make a (tag, union) layout,
931                 // so we need to grow the prefix alignment to be at least
932                 // the alignment of the union. (This value is used both for
933                 // determining the alignment of the overall enum, and the
934                 // determining the alignment of the payload after the tag.)
935                 let mut prefix_align = min_ity.align(dl);
936                 if def.repr.c() {
937                     for fields in &variants {
938                         for field in fields {
939                             prefix_align = prefix_align.max(field.align);
940                         }
941                     }
942                 }
943
944                 // Create the set of structs that represent each variant.
945                 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
946                     let mut st = univariant_uninterned(&field_layouts,
947                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
948                     st.variants = Variants::Single { index: i };
949                     // Find the first field we can't move later
950                     // to make room for a larger discriminant.
951                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
952                         if !field.is_zst() || field.align.abi() != 1 {
953                             start_align = start_align.min(field.align);
954                             break;
955                         }
956                     }
957                     size = cmp::max(size, st.size);
958                     align = align.max(st.align);
959                     Ok(st)
960                 }).collect::<Result<Vec<_>, _>>()?;
961
962                 // Align the maximum variant size to the largest alignment.
963                 size = size.abi_align(align);
964
965                 if size.bytes() >= dl.obj_size_bound() {
966                     return Err(LayoutError::SizeOverflow(ty));
967                 }
968
969                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
970                 if typeck_ity < min_ity {
971                     // It is a bug if Layout decided on a greater discriminant size than typeck for
972                     // some reason at this point (based on values discriminant can take on). Mostly
973                     // because this discriminant will be loaded, and then stored into variable of
974                     // type calculated by typeck. Consider such case (a bug): typeck decided on
975                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
976                     // discriminant values. That would be a bug, because then, in codegen, in order
977                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
978                     // space necessary to represent would have to be discarded (or layout is wrong
979                     // on thinking it needs 16 bits)
980                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
981                          min_ity, typeck_ity);
982                     // However, it is fine to make discr type however large (as an optimisation)
983                     // after this point â€“ we’ll just truncate the value we load in codegen.
984                 }
985
986                 // Check to see if we should use a different type for the
987                 // discriminant. We can safely use a type with the same size
988                 // as the alignment of the first field of each variant.
989                 // We increase the size of the discriminant to avoid LLVM copying
990                 // padding when it doesn't need to. This normally causes unaligned
991                 // load/stores and excessive memcpy/memset operations. By using a
992                 // bigger integer size, LLVM can be sure about its contents and
993                 // won't be so conservative.
994
995                 // Use the initial field alignment
996                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
997                     min_ity
998                 } else {
999                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1000                 };
1001
1002                 // If the alignment is not larger than the chosen discriminant size,
1003                 // don't use the alignment as the final size.
1004                 if ity <= min_ity {
1005                     ity = min_ity;
1006                 } else {
1007                     // Patch up the variants' first few fields.
1008                     let old_ity_size = min_ity.size();
1009                     let new_ity_size = ity.size();
1010                     for variant in &mut layout_variants {
1011                         match variant.fields {
1012                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1013                                 for i in offsets {
1014                                     if *i <= old_ity_size {
1015                                         assert_eq!(*i, old_ity_size);
1016                                         *i = new_ity_size;
1017                                     }
1018                                 }
1019                                 // We might be making the struct larger.
1020                                 if variant.size <= old_ity_size {
1021                                     variant.size = new_ity_size;
1022                                 }
1023                             }
1024                             _ => bug!()
1025                         }
1026                     }
1027                 }
1028
1029                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1030                 let tag = Scalar {
1031                     value: Int(ity, signed),
1032                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1033                 };
1034                 let mut abi = Abi::Aggregate { sized: true };
1035                 if tag.value.size(dl) == size {
1036                     abi = Abi::Scalar(tag.clone());
1037                 } else {
1038                     // Try to use a ScalarPair for all tagged enums.
1039                     let mut common_prim = None;
1040                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1041                         let offsets = match layout_variant.fields {
1042                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1043                             _ => bug!(),
1044                         };
1045                         let mut fields = field_layouts
1046                             .iter()
1047                             .zip(offsets)
1048                             .filter(|p| !p.0.is_zst());
1049                         let (field, offset) = match (fields.next(), fields.next()) {
1050                             (None, None) => continue,
1051                             (Some(pair), None) => pair,
1052                             _ => {
1053                                 common_prim = None;
1054                                 break;
1055                             }
1056                         };
1057                         let prim = match field.details.abi {
1058                             Abi::Scalar(ref scalar) => scalar.value,
1059                             _ => {
1060                                 common_prim = None;
1061                                 break;
1062                             }
1063                         };
1064                         if let Some(pair) = common_prim {
1065                             // This is pretty conservative. We could go fancier
1066                             // by conflating things like i32 and u32, or even
1067                             // realising that (u8, u8) could just cohabit with
1068                             // u16 or even u32.
1069                             if pair != (prim, offset) {
1070                                 common_prim = None;
1071                                 break;
1072                             }
1073                         } else {
1074                             common_prim = Some((prim, offset));
1075                         }
1076                     }
1077                     if let Some((prim, offset)) = common_prim {
1078                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1079                         let pair_offsets = match pair.fields {
1080                             FieldPlacement::Arbitrary {
1081                                 ref offsets,
1082                                 ref memory_index
1083                             } => {
1084                                 assert_eq!(memory_index, &[0, 1]);
1085                                 offsets
1086                             }
1087                             _ => bug!()
1088                         };
1089                         if pair_offsets[0] == Size::ZERO &&
1090                             pair_offsets[1] == *offset &&
1091                             align == pair.align &&
1092                             size == pair.size {
1093                             // We can use `ScalarPair` only when it matches our
1094                             // already computed layout (including `#[repr(C)]`).
1095                             abi = pair.abi;
1096                         }
1097                     }
1098                 }
1099
1100                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1101                     abi = Abi::Uninhabited;
1102                 }
1103
1104                 tcx.intern_layout(LayoutDetails {
1105                     variants: Variants::Tagged {
1106                         tag,
1107                         variants: layout_variants,
1108                     },
1109                     fields: FieldPlacement::Arbitrary {
1110                         offsets: vec![Size::ZERO],
1111                         memory_index: vec![0]
1112                     },
1113                     abi,
1114                     align,
1115                     size
1116                 })
1117             }
1118
1119             // Types with no meaningful known layout.
1120             ty::Projection(_) | ty::Opaque(..) => {
1121                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1122                 if ty == normalized {
1123                     return Err(LayoutError::Unknown(ty));
1124                 }
1125                 tcx.layout_raw(param_env.and(normalized))?
1126             }
1127             ty::UnnormalizedProjection(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1128                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1129             }
1130             ty::Param(_) | ty::Error => {
1131                 return Err(LayoutError::Unknown(ty));
1132             }
1133         })
1134     }
1135
1136     /// This is invoked by the `layout_raw` query to record the final
1137     /// layout of each type.
1138     #[inline]
1139     fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1140         // If we are running with `-Zprint-type-sizes`, record layouts for
1141         // dumping later. Ignore layouts that are done with non-empty
1142         // environments or non-monomorphic layouts, as the user only wants
1143         // to see the stuff resulting from the final codegen session.
1144         if
1145             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1146             layout.ty.has_param_types() ||
1147             layout.ty.has_self_ty() ||
1148             !self.param_env.caller_bounds.is_empty()
1149         {
1150             return;
1151         }
1152
1153         self.record_layout_for_printing_outlined(layout)
1154     }
1155
1156     fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1157         // (delay format until we actually need it)
1158         let record = |kind, packed, opt_discr_size, variants| {
1159             let type_desc = format!("{:?}", layout.ty);
1160             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1161                                                                    type_desc,
1162                                                                    layout.align,
1163                                                                    layout.size,
1164                                                                    packed,
1165                                                                    opt_discr_size,
1166                                                                    variants);
1167         };
1168
1169         let adt_def = match layout.ty.sty {
1170             ty::Adt(ref adt_def, _) => {
1171                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1172                 adt_def
1173             }
1174
1175             ty::Closure(..) => {
1176                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1177                 record(DataTypeKind::Closure, false, None, vec![]);
1178                 return;
1179             }
1180
1181             _ => {
1182                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1183                 return;
1184             }
1185         };
1186
1187         let adt_kind = adt_def.adt_kind();
1188         let adt_packed = adt_def.repr.packed();
1189
1190         let build_variant_info = |n: Option<ast::Name>,
1191                                   flds: &[ast::Name],
1192                                   layout: TyLayout<'tcx>| {
1193             let mut min_size = Size::ZERO;
1194             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1195                 match layout.field(self, i) {
1196                     Err(err) => {
1197                         bug!("no layout found for field {}: `{:?}`", name, err);
1198                     }
1199                     Ok(field_layout) => {
1200                         let offset = layout.fields.offset(i);
1201                         let field_end = offset + field_layout.size;
1202                         if min_size < field_end {
1203                             min_size = field_end;
1204                         }
1205                         session::FieldInfo {
1206                             name: name.to_string(),
1207                             offset: offset.bytes(),
1208                             size: field_layout.size.bytes(),
1209                             align: field_layout.align.abi(),
1210                         }
1211                     }
1212                 }
1213             }).collect();
1214
1215             session::VariantInfo {
1216                 name: n.map(|n|n.to_string()),
1217                 kind: if layout.is_unsized() {
1218                     session::SizeKind::Min
1219                 } else {
1220                     session::SizeKind::Exact
1221                 },
1222                 align: layout.align.abi(),
1223                 size: if min_size.bytes() == 0 {
1224                     layout.size.bytes()
1225                 } else {
1226                     min_size.bytes()
1227                 },
1228                 fields: field_info,
1229             }
1230         };
1231
1232         match layout.variants {
1233             Variants::Single { index } => {
1234                 debug!("print-type-size `{:#?}` variant {}",
1235                        layout, adt_def.variants[index].name);
1236                 if !adt_def.variants.is_empty() {
1237                     let variant_def = &adt_def.variants[index];
1238                     let fields: Vec<_> =
1239                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1240                     record(adt_kind.into(),
1241                            adt_packed,
1242                            None,
1243                            vec![build_variant_info(Some(variant_def.name),
1244                                                    &fields,
1245                                                    layout)]);
1246                 } else {
1247                     // (This case arises for *empty* enums; so give it
1248                     // zero variants.)
1249                     record(adt_kind.into(), adt_packed, None, vec![]);
1250                 }
1251             }
1252
1253             Variants::NicheFilling { .. } |
1254             Variants::Tagged { .. } => {
1255                 debug!("print-type-size `{:#?}` adt general variants def {}",
1256                        layout.ty, adt_def.variants.len());
1257                 let variant_infos: Vec<_> =
1258                     adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1259                         let fields: Vec<_> =
1260                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1261                         build_variant_info(Some(variant_def.name),
1262                                            &fields,
1263                                            layout.for_variant(self, i))
1264                     })
1265                     .collect();
1266                 record(adt_kind.into(), adt_packed, match layout.variants {
1267                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1268                     _ => None
1269                 }, variant_infos);
1270             }
1271         }
1272     }
1273 }
1274
1275 /// Type size "skeleton", i.e. the only information determining a type's size.
1276 /// While this is conservative, (aside from constant sizes, only pointers,
1277 /// newtypes thereof and null pointer optimized enums are allowed), it is
1278 /// enough to statically check common usecases of transmute.
1279 #[derive(Copy, Clone, Debug)]
1280 pub enum SizeSkeleton<'tcx> {
1281     /// Any statically computable Layout.
1282     Known(Size),
1283
1284     /// A potentially-fat pointer.
1285     Pointer {
1286         /// If true, this pointer is never null.
1287         non_zero: bool,
1288         /// The type which determines the unsized metadata, if any,
1289         /// of this pointer. Either a type parameter or a projection
1290         /// depending on one, with regions erased.
1291         tail: Ty<'tcx>
1292     }
1293 }
1294
1295 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1296     pub fn compute(ty: Ty<'tcx>,
1297                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1298                    param_env: ty::ParamEnv<'tcx>)
1299                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1300         debug_assert!(!ty.has_infer_types());
1301
1302         // First try computing a static layout.
1303         let err = match tcx.layout_of(param_env.and(ty)) {
1304             Ok(layout) => {
1305                 return Ok(SizeSkeleton::Known(layout.size));
1306             }
1307             Err(err) => err
1308         };
1309
1310         match ty.sty {
1311             ty::Ref(_, pointee, _) |
1312             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1313                 let non_zero = !ty.is_unsafe_ptr();
1314                 let tail = tcx.struct_tail(pointee);
1315                 match tail.sty {
1316                     ty::Param(_) | ty::Projection(_) => {
1317                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1318                         Ok(SizeSkeleton::Pointer {
1319                             non_zero,
1320                             tail: tcx.erase_regions(&tail)
1321                         })
1322                     }
1323                     _ => {
1324                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1325                               tail `{}` is not a type parameter or a projection",
1326                              ty, err, tail)
1327                     }
1328                 }
1329             }
1330
1331             ty::Adt(def, substs) => {
1332                 // Only newtypes and enums w/ nullable pointer optimization.
1333                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1334                     return Err(err);
1335                 }
1336
1337                 // Get a zero-sized variant or a pointer newtype.
1338                 let zero_or_ptr_variant = |i: usize| {
1339                     let fields = def.variants[i].fields.iter().map(|field| {
1340                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1341                     });
1342                     let mut ptr = None;
1343                     for field in fields {
1344                         let field = field?;
1345                         match field {
1346                             SizeSkeleton::Known(size) => {
1347                                 if size.bytes() > 0 {
1348                                     return Err(err);
1349                                 }
1350                             }
1351                             SizeSkeleton::Pointer {..} => {
1352                                 if ptr.is_some() {
1353                                     return Err(err);
1354                                 }
1355                                 ptr = Some(field);
1356                             }
1357                         }
1358                     }
1359                     Ok(ptr)
1360                 };
1361
1362                 let v0 = zero_or_ptr_variant(0)?;
1363                 // Newtype.
1364                 if def.variants.len() == 1 {
1365                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1366                         return Ok(SizeSkeleton::Pointer {
1367                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1368                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1369                                 (Bound::Included(start), Bound::Included(end)) =>
1370                                     0 < start && start < end,
1371                                 _ => false,
1372                             },
1373                             tail,
1374                         });
1375                     } else {
1376                         return Err(err);
1377                     }
1378                 }
1379
1380                 let v1 = zero_or_ptr_variant(1)?;
1381                 // Nullable pointer enum optimization.
1382                 match (v0, v1) {
1383                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1384                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1385                         Ok(SizeSkeleton::Pointer {
1386                             non_zero: false,
1387                             tail,
1388                         })
1389                     }
1390                     _ => Err(err)
1391                 }
1392             }
1393
1394             ty::Projection(_) | ty::Opaque(..) => {
1395                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1396                 if ty == normalized {
1397                     Err(err)
1398                 } else {
1399                     SizeSkeleton::compute(normalized, tcx, param_env)
1400                 }
1401             }
1402
1403             _ => Err(err)
1404         }
1405     }
1406
1407     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1408         match (self, other) {
1409             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1410             (SizeSkeleton::Pointer { tail: a, .. },
1411              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1412             _ => false
1413         }
1414     }
1415 }
1416
1417 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1418     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1419 }
1420
1421 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1422     fn data_layout(&self) -> &TargetDataLayout {
1423         &self.data_layout
1424     }
1425 }
1426
1427 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1428     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1429         self.global_tcx()
1430     }
1431 }
1432
1433 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1434     fn data_layout(&self) -> &TargetDataLayout {
1435         self.tcx.data_layout()
1436     }
1437 }
1438
1439 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1440     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1441         self.tcx.tcx()
1442     }
1443 }
1444
1445 pub trait MaybeResult<T> {
1446     fn from_ok(x: T) -> Self;
1447     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1448 }
1449
1450 impl<T> MaybeResult<T> for T {
1451     fn from_ok(x: T) -> Self {
1452         x
1453     }
1454     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1455         f(self)
1456     }
1457 }
1458
1459 impl<T, E> MaybeResult<T> for Result<T, E> {
1460     fn from_ok(x: T) -> Self {
1461         Ok(x)
1462     }
1463     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1464         self.map(f)
1465     }
1466 }
1467
1468 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1469
1470 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1471     type Ty = Ty<'tcx>;
1472     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1473
1474     /// Computes the layout of a type. Note that this implicitly
1475     /// executes in "reveal all" mode.
1476     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1477         let param_env = self.param_env.with_reveal_all();
1478         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1479         let details = self.tcx.layout_raw(param_env.and(ty))?;
1480         let layout = TyLayout {
1481             ty,
1482             details
1483         };
1484
1485         // NB: This recording is normally disabled; when enabled, it
1486         // can however trigger recursive invocations of `layout_of`.
1487         // Therefore, we execute it *after* the main query has
1488         // completed, to avoid problems around recursive structures
1489         // and the like. (Admittedly, I wasn't able to reproduce a problem
1490         // here, but it seems like the right thing to do. -nmatsakis)
1491         self.record_layout_for_printing(layout);
1492
1493         Ok(layout)
1494     }
1495 }
1496
1497 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1498     type Ty = Ty<'tcx>;
1499     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1500
1501     /// Computes the layout of a type. Note that this implicitly
1502     /// executes in "reveal all" mode.
1503     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1504         let param_env = self.param_env.with_reveal_all();
1505         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1506         let details = self.tcx.layout_raw(param_env.and(ty))?;
1507         let layout = TyLayout {
1508             ty,
1509             details
1510         };
1511
1512         // NB: This recording is normally disabled; when enabled, it
1513         // can however trigger recursive invocations of `layout_of`.
1514         // Therefore, we execute it *after* the main query has
1515         // completed, to avoid problems around recursive structures
1516         // and the like. (Admittedly, I wasn't able to reproduce a problem
1517         // here, but it seems like the right thing to do. -nmatsakis)
1518         let cx = LayoutCx {
1519             tcx: *self.tcx,
1520             param_env: self.param_env
1521         };
1522         cx.record_layout_for_printing(layout);
1523
1524         Ok(layout)
1525     }
1526 }
1527
1528 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1529 impl TyCtxt<'a, 'tcx, '_> {
1530     /// Computes the layout of a type. Note that this implicitly
1531     /// executes in "reveal all" mode.
1532     #[inline]
1533     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1534                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1535         let cx = LayoutCx {
1536             tcx: self.global_tcx(),
1537             param_env: param_env_and_ty.param_env
1538         };
1539         cx.layout_of(param_env_and_ty.value)
1540     }
1541 }
1542
1543 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1544     /// Computes the layout of a type. Note that this implicitly
1545     /// executes in "reveal all" mode.
1546     #[inline]
1547     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1548                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1549         let cx = LayoutCx {
1550             tcx: self.global_tcx().at(self.span),
1551             param_env: param_env_and_ty.param_env
1552         };
1553         cx.layout_of(param_env_and_ty.value)
1554     }
1555 }
1556
1557 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1558     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1559           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1560 {
1561     fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1562         let details = match this.variants {
1563             Variants::Single { index } if index == variant_index => this.details,
1564
1565             Variants::Single { index } => {
1566                 // Deny calling for_variant more than once for non-Single enums.
1567                 cx.layout_of(this.ty).map_same(|layout| {
1568                     assert_eq!(layout.variants, Variants::Single { index });
1569                     layout
1570                 });
1571
1572                 let fields = match this.ty.sty {
1573                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1574                     _ => bug!()
1575                 };
1576                 let tcx = cx.tcx();
1577                 tcx.intern_layout(LayoutDetails {
1578                     variants: Variants::Single { index: variant_index },
1579                     fields: FieldPlacement::Union(fields),
1580                     abi: Abi::Uninhabited,
1581                     align: tcx.data_layout.i8_align,
1582                     size: Size::ZERO
1583                 })
1584             }
1585
1586             Variants::NicheFilling { ref variants, .. } |
1587             Variants::Tagged { ref variants, .. } => {
1588                 &variants[variant_index]
1589             }
1590         };
1591
1592         assert_eq!(details.variants, Variants::Single { index: variant_index });
1593
1594         TyLayout {
1595             ty: this.ty,
1596             details
1597         }
1598     }
1599
1600     fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1601         let tcx = cx.tcx();
1602         cx.layout_of(match this.ty.sty {
1603             ty::Bool |
1604             ty::Char |
1605             ty::Int(_) |
1606             ty::Uint(_) |
1607             ty::Float(_) |
1608             ty::FnPtr(_) |
1609             ty::Never |
1610             ty::FnDef(..) |
1611             ty::GeneratorWitness(..) |
1612             ty::Foreign(..) |
1613             ty::Dynamic(..) => {
1614                 bug!("TyLayout::field_type({:?}): not applicable", this)
1615             }
1616
1617             // Potentially-fat pointers.
1618             ty::Ref(_, pointee, _) |
1619             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1620                 assert!(i < this.fields.count());
1621
1622                 // Reuse the fat *T type as its own thin pointer data field.
1623                 // This provides information about e.g. DST struct pointees
1624                 // (which may have no non-DST form), and will work as long
1625                 // as the `Abi` or `FieldPlacement` is checked by users.
1626                 if i == 0 {
1627                     let nil = tcx.mk_unit();
1628                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1629                         tcx.mk_mut_ptr(nil)
1630                     } else {
1631                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1632                     };
1633                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1634                         ptr_layout.ty = this.ty;
1635                         ptr_layout
1636                     });
1637                 }
1638
1639                 match tcx.struct_tail(pointee).sty {
1640                     ty::Slice(_) |
1641                     ty::Str => tcx.types.usize,
1642                     ty::Dynamic(_, _) => {
1643                         tcx.mk_imm_ref(
1644                             tcx.types.re_static,
1645                             tcx.mk_array(tcx.types.usize, 3),
1646                         )
1647                         /* FIXME use actual fn pointers
1648                         Warning: naively computing the number of entries in the
1649                         vtable by counting the methods on the trait + methods on
1650                         all parent traits does not work, because some methods can
1651                         be not object safe and thus excluded from the vtable.
1652                         Increase this counter if you tried to implement this but
1653                         failed to do it without duplicating a lot of code from
1654                         other places in the compiler: 2
1655                         tcx.mk_tup(&[
1656                             tcx.mk_array(tcx.types.usize, 3),
1657                             tcx.mk_array(Option<fn()>),
1658                         ])
1659                         */
1660                     }
1661                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1662                 }
1663             }
1664
1665             // Arrays and slices.
1666             ty::Array(element, _) |
1667             ty::Slice(element) => element,
1668             ty::Str => tcx.types.u8,
1669
1670             // Tuples, generators and closures.
1671             ty::Closure(def_id, ref substs) => {
1672                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1673             }
1674
1675             ty::Generator(def_id, ref substs, _) => {
1676                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1677             }
1678
1679             ty::Tuple(tys) => tys[i],
1680
1681             // SIMD vector types.
1682             ty::Adt(def, ..) if def.repr.simd() => {
1683                 this.ty.simd_type(tcx)
1684             }
1685
1686             // ADTs.
1687             ty::Adt(def, substs) => {
1688                 match this.variants {
1689                     Variants::Single { index } => {
1690                         def.variants[index].fields[i].ty(tcx, substs)
1691                     }
1692
1693                     // Discriminant field for enums (where applicable).
1694                     Variants::Tagged { tag: ref discr, .. } |
1695                     Variants::NicheFilling { niche: ref discr, .. } => {
1696                         assert_eq!(i, 0);
1697                         let layout = LayoutDetails::scalar(tcx, discr.clone());
1698                         return MaybeResult::from_ok(TyLayout {
1699                             details: tcx.intern_layout(layout),
1700                             ty: discr.value.to_ty(tcx)
1701                         });
1702                     }
1703                 }
1704             }
1705
1706             ty::Projection(_) | ty::UnnormalizedProjection(..) |
1707             ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1708                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1709             }
1710         })
1711     }
1712 }
1713
1714 struct Niche {
1715     offset: Size,
1716     scalar: Scalar,
1717     available: u128,
1718 }
1719
1720 impl Niche {
1721     fn reserve<'a, 'tcx>(
1722         &self,
1723         cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1724         count: u128,
1725     ) -> Option<(u128, Scalar)> {
1726         if count > self.available {
1727             return None;
1728         }
1729         let Scalar { value, valid_range: ref v } = self.scalar;
1730         let bits = value.size(cx).bits();
1731         assert!(bits <= 128);
1732         let max_value = !0u128 >> (128 - bits);
1733         let start = v.end().wrapping_add(1) & max_value;
1734         let end = v.end().wrapping_add(count) & max_value;
1735         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1736     }
1737 }
1738
1739 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1740     /// Find the offset of a niche leaf field, starting from
1741     /// the given type and recursing through aggregates.
1742     // FIXME(eddyb) traverse already optimized enums.
1743     fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1744         let scalar_niche = |scalar: &Scalar, offset| {
1745             let Scalar { value, valid_range: ref v } = *scalar;
1746
1747             let bits = value.size(self).bits();
1748             assert!(bits <= 128);
1749             let max_value = !0u128 >> (128 - bits);
1750
1751             // Find out how many values are outside the valid range.
1752             let available = if v.start() <= v.end() {
1753                 v.start() + (max_value - v.end())
1754             } else {
1755                 v.start() - v.end() - 1
1756             };
1757
1758             // Give up if there is no niche value available.
1759             if available == 0 {
1760                 return None;
1761             }
1762
1763             Some(Niche { offset, scalar: scalar.clone(), available })
1764         };
1765
1766         // Locals variables which live across yields are stored
1767         // in the generator type as fields. These may be uninitialized
1768         // so we don't look for niches there.
1769         if let ty::Generator(..) = layout.ty.sty {
1770             return Ok(None);
1771         }
1772
1773         match layout.abi {
1774             Abi::Scalar(ref scalar) => {
1775                 return Ok(scalar_niche(scalar, Size::ZERO));
1776             }
1777             Abi::ScalarPair(ref a, ref b) => {
1778                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1779                 // returns the last maximum.
1780                 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1781                     .chain(iter::once((a, Size::ZERO)))
1782                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1783                     .max_by_key(|niche| niche.available);
1784                 return Ok(niche);
1785             }
1786             Abi::Vector { ref element, .. } => {
1787                 return Ok(scalar_niche(element, Size::ZERO));
1788             }
1789             _ => {}
1790         }
1791
1792         // Perhaps one of the fields is non-zero, let's recurse and find out.
1793         if let FieldPlacement::Union(_) = layout.fields {
1794             // Only Rust enums have safe-to-inspect fields
1795             // (a discriminant), other unions are unsafe.
1796             if let Variants::Single { .. } = layout.variants {
1797                 return Ok(None);
1798             }
1799         }
1800         if let FieldPlacement::Array { .. } = layout.fields {
1801             if layout.fields.count() > 0 {
1802                 return self.find_niche(layout.field(self, 0)?);
1803             } else {
1804                 return Ok(None);
1805             }
1806         }
1807         let mut niche = None;
1808         let mut available = 0;
1809         for i in 0..layout.fields.count() {
1810             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1811                 if c.available > available {
1812                     available = c.available;
1813                     c.offset += layout.fields.offset(i);
1814                     niche = Some(c);
1815                 }
1816             }
1817         }
1818         Ok(niche)
1819     }
1820 }
1821
1822 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1823     fn hash_stable<W: StableHasherResult>(&self,
1824                                           hcx: &mut StableHashingContext<'a>,
1825                                           hasher: &mut StableHasher<W>) {
1826         use ty::layout::Variants::*;
1827         mem::discriminant(self).hash_stable(hcx, hasher);
1828
1829         match *self {
1830             Single { index } => {
1831                 index.hash_stable(hcx, hasher);
1832             }
1833             Tagged {
1834                 ref tag,
1835                 ref variants,
1836             } => {
1837                 tag.hash_stable(hcx, hasher);
1838                 variants.hash_stable(hcx, hasher);
1839             }
1840             NicheFilling {
1841                 dataful_variant,
1842                 ref niche_variants,
1843                 ref niche,
1844                 niche_start,
1845                 ref variants,
1846             } => {
1847                 dataful_variant.hash_stable(hcx, hasher);
1848                 niche_variants.start().hash_stable(hcx, hasher);
1849                 niche_variants.end().hash_stable(hcx, hasher);
1850                 niche.hash_stable(hcx, hasher);
1851                 niche_start.hash_stable(hcx, hasher);
1852                 variants.hash_stable(hcx, hasher);
1853             }
1854         }
1855     }
1856 }
1857
1858 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1859     fn hash_stable<W: StableHasherResult>(&self,
1860                                           hcx: &mut StableHashingContext<'a>,
1861                                           hasher: &mut StableHasher<W>) {
1862         use ty::layout::FieldPlacement::*;
1863         mem::discriminant(self).hash_stable(hcx, hasher);
1864
1865         match *self {
1866             Union(count) => {
1867                 count.hash_stable(hcx, hasher);
1868             }
1869             Array { count, stride } => {
1870                 count.hash_stable(hcx, hasher);
1871                 stride.hash_stable(hcx, hasher);
1872             }
1873             Arbitrary { ref offsets, ref memory_index } => {
1874                 offsets.hash_stable(hcx, hasher);
1875                 memory_index.hash_stable(hcx, hasher);
1876             }
1877         }
1878     }
1879 }
1880
1881 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1882     fn hash_stable<W: StableHasherResult>(&self,
1883                                           hcx: &mut StableHashingContext<'a>,
1884                                           hasher: &mut StableHasher<W>) {
1885         use ty::layout::Abi::*;
1886         mem::discriminant(self).hash_stable(hcx, hasher);
1887
1888         match *self {
1889             Uninhabited => {}
1890             Scalar(ref value) => {
1891                 value.hash_stable(hcx, hasher);
1892             }
1893             ScalarPair(ref a, ref b) => {
1894                 a.hash_stable(hcx, hasher);
1895                 b.hash_stable(hcx, hasher);
1896             }
1897             Vector { ref element, count } => {
1898                 element.hash_stable(hcx, hasher);
1899                 count.hash_stable(hcx, hasher);
1900             }
1901             Aggregate { sized } => {
1902                 sized.hash_stable(hcx, hasher);
1903             }
1904         }
1905     }
1906 }
1907
1908 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1909     fn hash_stable<W: StableHasherResult>(&self,
1910                                           hcx: &mut StableHashingContext<'a>,
1911                                           hasher: &mut StableHasher<W>) {
1912         let Scalar { value, ref valid_range } = *self;
1913         value.hash_stable(hcx, hasher);
1914         valid_range.start().hash_stable(hcx, hasher);
1915         valid_range.end().hash_stable(hcx, hasher);
1916     }
1917 }
1918
1919 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1920     variants,
1921     fields,
1922     abi,
1923     size,
1924     align
1925 });
1926
1927 impl_stable_hash_for!(enum ::ty::layout::Integer {
1928     I8,
1929     I16,
1930     I32,
1931     I64,
1932     I128
1933 });
1934
1935 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1936     Int(integer, signed),
1937     Float(fty),
1938     Pointer
1939 });
1940
1941 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1942     fn hash_stable<W: StableHasherResult>(&self,
1943                                           hcx: &mut StableHashingContext<'gcx>,
1944                                           hasher: &mut StableHasher<W>) {
1945         self.abi().hash_stable(hcx, hasher);
1946         self.pref().hash_stable(hcx, hasher);
1947     }
1948 }
1949
1950 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1951     fn hash_stable<W: StableHasherResult>(&self,
1952                                           hcx: &mut StableHashingContext<'gcx>,
1953                                           hasher: &mut StableHasher<W>) {
1954         self.bytes().hash_stable(hcx, hasher);
1955     }
1956 }
1957
1958 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1959 {
1960     fn hash_stable<W: StableHasherResult>(&self,
1961                                           hcx: &mut StableHashingContext<'a>,
1962                                           hasher: &mut StableHasher<W>) {
1963         use ty::layout::LayoutError::*;
1964         mem::discriminant(self).hash_stable(hcx, hasher);
1965
1966         match *self {
1967             Unknown(t) |
1968             SizeOverflow(t) => t.hash_stable(hcx, hasher)
1969         }
1970     }
1971 }