]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Rollup merge of #55754 - spastorino:fix-process-output-docs, r=alexcrichton
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23 use std::ops::Bound;
24
25 use ich::StableHashingContext;
26 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
27                                            StableHasherResult};
28
29 pub use rustc_target::abi::*;
30
31 pub trait IntegerExt {
32     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
33     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
34     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
35                             ty: Ty<'tcx>,
36                             repr: &ReprOptions,
37                             min: i128,
38                             max: i128)
39                             -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Get the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Find the appropriate Integer type and signedness for the given
75     /// signed discriminant range and #[repr] attribute.
76     /// N.B.: u128 values above i128::MAX will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
79                             ty: Ty<'tcx>,
80                             repr: &ReprOptions,
81                             min: i128,
82                             max: i128)
83                             -> (Integer, bool) {
84         // Theoretically, negative values could be larger in unsigned representation
85         // than the unsigned representation of the signed minimum. However, if there
86         // are any negative values, the only valid unsigned representation is u128
87         // which can fit all i128 values, so the result remains unaffected.
88         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
89         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
90
91         let mut min_from_extern = None;
92         let min_default = I8;
93
94         if let Some(ity) = repr.int {
95             let discr = Integer::from_attr(&tcx, ity);
96             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
97             if discr < fit {
98                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
99                       discriminant range of enum `{}", ty)
100             }
101             return (discr, ity.is_signed());
102         }
103
104         if repr.c() {
105             match &tcx.sess.target.target.arch[..] {
106                 // WARNING: the ARM EABI has two variants; the one corresponding
107                 // to `at_least == I32` appears to be used on Linux and NetBSD,
108                 // but some systems may use the variant corresponding to no
109                 // lower bound. However, we don't run on those yet...?
110                 "arm" => min_from_extern = Some(I32),
111                 _ => min_from_extern = Some(I32),
112             }
113         }
114
115         let at_least = min_from_extern.unwrap_or(min_default);
116
117         // If there are no negative values, we can use the unsigned fit.
118         if min >= 0 {
119             (cmp::max(unsigned_fit, at_least), false)
120         } else {
121             (cmp::max(signed_fit, at_least), true)
122         }
123     }
124 }
125
126 pub trait PrimitiveExt {
127     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
128 }
129
130 impl PrimitiveExt for Primitive {
131     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
132         match *self {
133             Int(i, signed) => i.to_ty(tcx, signed),
134             Float(FloatTy::F32) => tcx.types.f32,
135             Float(FloatTy::F64) => tcx.types.f64,
136             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
137         }
138     }
139 }
140
141 /// The first half of a fat pointer.
142 ///
143 /// - For a trait object, this is the address of the box.
144 /// - For a slice, this is the base address.
145 pub const FAT_PTR_ADDR: usize = 0;
146
147 /// The second half of a fat pointer.
148 ///
149 /// - For a trait object, this is the address of the vtable.
150 /// - For a slice, this is the length.
151 pub const FAT_PTR_EXTRA: usize = 1;
152
153 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
154 pub enum LayoutError<'tcx> {
155     Unknown(Ty<'tcx>),
156     SizeOverflow(Ty<'tcx>)
157 }
158
159 impl<'tcx> fmt::Display for LayoutError<'tcx> {
160     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
161         match *self {
162             LayoutError::Unknown(ty) => {
163                 write!(f, "the type `{:?}` has an unknown layout", ty)
164             }
165             LayoutError::SizeOverflow(ty) => {
166                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
167             }
168         }
169     }
170 }
171
172 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
173                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
174                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
175 {
176     ty::tls::with_related_context(tcx, move |icx| {
177         let rec_limit = *tcx.sess.recursion_limit.get();
178         let (param_env, ty) = query.into_parts();
179
180         if icx.layout_depth > rec_limit {
181             tcx.sess.fatal(
182                 &format!("overflow representing the type `{}`", ty));
183         }
184
185         // Update the ImplicitCtxt to increase the layout_depth
186         let icx = ty::tls::ImplicitCtxt {
187             layout_depth: icx.layout_depth + 1,
188             ..icx.clone()
189         };
190
191         ty::tls::enter_context(&icx, |_| {
192             let cx = LayoutCx { tcx, param_env };
193             cx.layout_raw_uncached(ty)
194         })
195     })
196 }
197
198 pub fn provide(providers: &mut ty::query::Providers<'_>) {
199     *providers = ty::query::Providers {
200         layout_raw,
201         ..*providers
202     };
203 }
204
205 pub struct LayoutCx<'tcx, C> {
206     pub tcx: C,
207     pub param_env: ty::ParamEnv<'tcx>
208 }
209
210 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
211     fn layout_raw_uncached(&self, ty: Ty<'tcx>)
212                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
213         let tcx = self.tcx;
214         let param_env = self.param_env;
215         let dl = self.data_layout();
216         let scalar_unit = |value: Primitive| {
217             let bits = value.size(dl).bits();
218             assert!(bits <= 128);
219             Scalar {
220                 value,
221                 valid_range: 0..=(!0 >> (128 - bits))
222             }
223         };
224         let scalar = |value: Primitive| {
225             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
226         };
227         let scalar_pair = |a: Scalar, b: Scalar| {
228             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
229             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
230             let size = (b_offset + b.value.size(dl)).abi_align(align);
231             LayoutDetails {
232                 variants: Variants::Single { index: 0 },
233                 fields: FieldPlacement::Arbitrary {
234                     offsets: vec![Size::ZERO, b_offset],
235                     memory_index: vec![0, 1]
236                 },
237                 abi: Abi::ScalarPair(a, b),
238                 align,
239                 size
240             }
241         };
242
243         #[derive(Copy, Clone, Debug)]
244         enum StructKind {
245             /// A tuple, closure, or univariant which cannot be coerced to unsized.
246             AlwaysSized,
247             /// A univariant, the last field of which may be coerced to unsized.
248             MaybeUnsized,
249             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
250             Prefixed(Size, Align),
251         }
252
253         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
254             let packed = repr.packed();
255             if packed && repr.align > 0 {
256                 bug!("struct cannot be packed and aligned");
257             }
258
259             let pack = {
260                 let pack = repr.pack as u64;
261                 Align::from_bytes(pack, pack).unwrap()
262             };
263
264             let mut align = if packed {
265                 dl.i8_align
266             } else {
267                 dl.aggregate_align
268             };
269
270             let mut sized = true;
271             let mut offsets = vec![Size::ZERO; fields.len()];
272             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
273
274             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
275             if let StructKind::Prefixed(_, align) = kind {
276                 optimize &= align.abi() == 1;
277             }
278
279             if optimize {
280                 let end = if let StructKind::MaybeUnsized = kind {
281                     fields.len() - 1
282                 } else {
283                     fields.len()
284                 };
285                 let optimizing = &mut inverse_memory_index[..end];
286                 let field_align = |f: &TyLayout<'_>| {
287                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
288                 };
289                 match kind {
290                     StructKind::AlwaysSized |
291                     StructKind::MaybeUnsized => {
292                         optimizing.sort_by_key(|&x| {
293                             // Place ZSTs first to avoid "interesting offsets",
294                             // especially with only one or two non-ZST fields.
295                             let f = &fields[x as usize];
296                             (!f.is_zst(), cmp::Reverse(field_align(f)))
297                         });
298                     }
299                     StructKind::Prefixed(..) => {
300                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
301                     }
302                 }
303             }
304
305             // inverse_memory_index holds field indices by increasing memory offset.
306             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
307             // We now write field offsets to the corresponding offset slot;
308             // field 5 with offset 0 puts 0 in offsets[5].
309             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
310
311             let mut offset = Size::ZERO;
312
313             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
314                 if packed {
315                     let prefix_align = prefix_align.min(pack);
316                     align = align.max(prefix_align);
317                 } else {
318                     align = align.max(prefix_align);
319                 }
320                 offset = prefix_size.abi_align(prefix_align);
321             }
322
323             for &i in &inverse_memory_index {
324                 let field = fields[i as usize];
325                 if !sized {
326                     bug!("univariant: field #{} of `{}` comes after unsized field",
327                          offsets.len(), ty);
328                 }
329
330                 if field.is_unsized() {
331                     sized = false;
332                 }
333
334                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
335                 if packed {
336                     let field_pack = field.align.min(pack);
337                     offset = offset.abi_align(field_pack);
338                     align = align.max(field_pack);
339                 }
340                 else {
341                     offset = offset.abi_align(field.align);
342                     align = align.max(field.align);
343                 }
344
345                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
346                 offsets[i as usize] = offset;
347
348                 offset = offset.checked_add(field.size, dl)
349                     .ok_or(LayoutError::SizeOverflow(ty))?;
350             }
351
352             if repr.align > 0 {
353                 let repr_align = repr.align as u64;
354                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
355                 debug!("univariant repr_align: {:?}", repr_align);
356             }
357
358             debug!("univariant min_size: {:?}", offset);
359             let min_size = offset;
360
361             // As stated above, inverse_memory_index holds field indices by increasing offset.
362             // This makes it an already-sorted view of the offsets vec.
363             // To invert it, consider:
364             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
365             // Field 5 would be the first element, so memory_index is i:
366             // Note: if we didn't optimize, it's already right.
367
368             let mut memory_index;
369             if optimize {
370                 memory_index = vec![0; inverse_memory_index.len()];
371
372                 for i in 0..inverse_memory_index.len() {
373                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
374                 }
375             } else {
376                 memory_index = inverse_memory_index;
377             }
378
379             let size = min_size.abi_align(align);
380             let mut abi = Abi::Aggregate { sized };
381
382             // Unpack newtype ABIs and find scalar pairs.
383             if sized && size.bytes() > 0 {
384                 // All other fields must be ZSTs, and we need them to all start at 0.
385                 let mut zst_offsets =
386                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
387                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
388                     let mut non_zst_fields =
389                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
390
391                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
392                         // We have exactly one non-ZST field.
393                         (Some((i, field)), None, None) => {
394                             // Field fills the struct and it has a scalar or scalar pair ABI.
395                             if offsets[i].bytes() == 0 &&
396                                align.abi() == field.align.abi() &&
397                                size == field.size {
398                                 match field.abi {
399                                     // For plain scalars, or vectors of them, we can't unpack
400                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
401                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
402                                         abi = field.abi.clone();
403                                     }
404                                     // But scalar pairs are Rust-specific and get
405                                     // treated as aggregates by C ABIs anyway.
406                                     Abi::ScalarPair(..) => {
407                                         abi = field.abi.clone();
408                                     }
409                                     _ => {}
410                                 }
411                             }
412                         }
413
414                         // Two non-ZST fields, and they're both scalars.
415                         (Some((i, &TyLayout {
416                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
417                         })), Some((j, &TyLayout {
418                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
419                         })), None) => {
420                             // Order by the memory placement, not source order.
421                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
422                                 ((i, a), (j, b))
423                             } else {
424                                 ((j, b), (i, a))
425                             };
426                             let pair = scalar_pair(a.clone(), b.clone());
427                             let pair_offsets = match pair.fields {
428                                 FieldPlacement::Arbitrary {
429                                     ref offsets,
430                                     ref memory_index
431                                 } => {
432                                     assert_eq!(memory_index, &[0, 1]);
433                                     offsets
434                                 }
435                                 _ => bug!()
436                             };
437                             if offsets[i] == pair_offsets[0] &&
438                                offsets[j] == pair_offsets[1] &&
439                                align == pair.align &&
440                                size == pair.size {
441                                 // We can use `ScalarPair` only when it matches our
442                                 // already computed layout (including `#[repr(C)]`).
443                                 abi = pair.abi;
444                             }
445                         }
446
447                         _ => {}
448                     }
449                 }
450             }
451
452             if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
453                 abi = Abi::Uninhabited;
454             }
455
456             Ok(LayoutDetails {
457                 variants: Variants::Single { index: 0 },
458                 fields: FieldPlacement::Arbitrary {
459                     offsets,
460                     memory_index
461                 },
462                 abi,
463                 align,
464                 size
465             })
466         };
467         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
468             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
469         };
470         debug_assert!(!ty.has_infer_types());
471
472         Ok(match ty.sty {
473             // Basic scalars.
474             ty::Bool => {
475                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
476                     value: Int(I8, false),
477                     valid_range: 0..=1
478                 }))
479             }
480             ty::Char => {
481                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
482                     value: Int(I32, false),
483                     valid_range: 0..=0x10FFFF
484                 }))
485             }
486             ty::Int(ity) => {
487                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
488             }
489             ty::Uint(ity) => {
490                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
491             }
492             ty::Float(fty) => scalar(Float(fty)),
493             ty::FnPtr(_) => {
494                 let mut ptr = scalar_unit(Pointer);
495                 ptr.valid_range = 1..=*ptr.valid_range.end();
496                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
497             }
498
499             // The never type.
500             ty::Never => {
501                 tcx.intern_layout(LayoutDetails {
502                     variants: Variants::Single { index: 0 },
503                     fields: FieldPlacement::Union(0),
504                     abi: Abi::Uninhabited,
505                     align: dl.i8_align,
506                     size: Size::ZERO
507                 })
508             }
509
510             // Potentially-fat pointers.
511             ty::Ref(_, pointee, _) |
512             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
513                 let mut data_ptr = scalar_unit(Pointer);
514                 if !ty.is_unsafe_ptr() {
515                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
516                 }
517
518                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
519                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
520                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
521                 }
522
523                 let unsized_part = tcx.struct_tail(pointee);
524                 let metadata = match unsized_part.sty {
525                     ty::Foreign(..) => {
526                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
527                     }
528                     ty::Slice(_) | ty::Str => {
529                         scalar_unit(Int(dl.ptr_sized_integer(), false))
530                     }
531                     ty::Dynamic(..) => {
532                         let mut vtable = scalar_unit(Pointer);
533                         vtable.valid_range = 1..=*vtable.valid_range.end();
534                         vtable
535                     }
536                     _ => return Err(LayoutError::Unknown(unsized_part))
537                 };
538
539                 // Effectively a (ptr, meta) tuple.
540                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
541             }
542
543             // Arrays and slices.
544             ty::Array(element, mut count) => {
545                 if count.has_projections() {
546                     count = tcx.normalize_erasing_regions(param_env, count);
547                     if count.has_projections() {
548                         return Err(LayoutError::Unknown(ty));
549                     }
550                 }
551
552                 let element = self.layout_of(element)?;
553                 let count = count.unwrap_usize(tcx);
554                 let size = element.size.checked_mul(count, dl)
555                     .ok_or(LayoutError::SizeOverflow(ty))?;
556
557                 tcx.intern_layout(LayoutDetails {
558                     variants: Variants::Single { index: 0 },
559                     fields: FieldPlacement::Array {
560                         stride: element.size,
561                         count
562                     },
563                     abi: Abi::Aggregate { sized: true },
564                     align: element.align,
565                     size
566                 })
567             }
568             ty::Slice(element) => {
569                 let element = self.layout_of(element)?;
570                 tcx.intern_layout(LayoutDetails {
571                     variants: Variants::Single { index: 0 },
572                     fields: FieldPlacement::Array {
573                         stride: element.size,
574                         count: 0
575                     },
576                     abi: Abi::Aggregate { sized: false },
577                     align: element.align,
578                     size: Size::ZERO
579                 })
580             }
581             ty::Str => {
582                 tcx.intern_layout(LayoutDetails {
583                     variants: Variants::Single { index: 0 },
584                     fields: FieldPlacement::Array {
585                         stride: Size::from_bytes(1),
586                         count: 0
587                     },
588                     abi: Abi::Aggregate { sized: false },
589                     align: dl.i8_align,
590                     size: Size::ZERO
591                 })
592             }
593
594             // Odd unit types.
595             ty::FnDef(..) => {
596                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
597             }
598             ty::Dynamic(..) | ty::Foreign(..) => {
599                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
600                   StructKind::AlwaysSized)?;
601                 match unit.abi {
602                     Abi::Aggregate { ref mut sized } => *sized = false,
603                     _ => bug!()
604                 }
605                 tcx.intern_layout(unit)
606             }
607
608             // Tuples, generators and closures.
609             ty::Generator(def_id, ref substs, _) => {
610                 let tys = substs.field_tys(def_id, tcx);
611                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
612                     &ReprOptions::default(),
613                     StructKind::AlwaysSized)?
614             }
615
616             ty::Closure(def_id, ref substs) => {
617                 let tys = substs.upvar_tys(def_id, tcx);
618                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
619                     &ReprOptions::default(),
620                     StructKind::AlwaysSized)?
621             }
622
623             ty::Tuple(tys) => {
624                 let kind = if tys.len() == 0 {
625                     StructKind::AlwaysSized
626                 } else {
627                     StructKind::MaybeUnsized
628                 };
629
630                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
631                            &ReprOptions::default(), kind)?
632             }
633
634             // SIMD vector types.
635             ty::Adt(def, ..) if def.repr.simd() => {
636                 let element = self.layout_of(ty.simd_type(tcx))?;
637                 let count = ty.simd_size(tcx) as u64;
638                 assert!(count > 0);
639                 let scalar = match element.abi {
640                     Abi::Scalar(ref scalar) => scalar.clone(),
641                     _ => {
642                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
643                                                  a non-machine element type `{}`",
644                                                 ty, element.ty));
645                     }
646                 };
647                 let size = element.size.checked_mul(count, dl)
648                     .ok_or(LayoutError::SizeOverflow(ty))?;
649                 let align = dl.vector_align(size);
650                 let size = size.abi_align(align);
651
652                 tcx.intern_layout(LayoutDetails {
653                     variants: Variants::Single { index: 0 },
654                     fields: FieldPlacement::Array {
655                         stride: element.size,
656                         count
657                     },
658                     abi: Abi::Vector {
659                         element: scalar,
660                         count
661                     },
662                     size,
663                     align,
664                 })
665             }
666
667             // ADTs.
668             ty::Adt(def, substs) => {
669                 // Cache the field layouts.
670                 let variants = def.variants.iter().map(|v| {
671                     v.fields.iter().map(|field| {
672                         self.layout_of(field.ty(tcx, substs))
673                     }).collect::<Result<Vec<_>, _>>()
674                 }).collect::<Result<Vec<_>, _>>()?;
675
676                 if def.is_union() {
677                     let packed = def.repr.packed();
678                     if packed && def.repr.align > 0 {
679                         bug!("Union cannot be packed and aligned");
680                     }
681
682                     let pack = {
683                         let pack = def.repr.pack as u64;
684                         Align::from_bytes(pack, pack).unwrap()
685                     };
686
687                     let mut align = if packed {
688                         dl.i8_align
689                     } else {
690                         dl.aggregate_align
691                     };
692
693                     if def.repr.align > 0 {
694                         let repr_align = def.repr.align as u64;
695                         align = align.max(
696                             Align::from_bytes(repr_align, repr_align).unwrap());
697                     }
698
699                     let mut size = Size::ZERO;
700                     for field in &variants[0] {
701                         assert!(!field.is_unsized());
702
703                         if packed {
704                             let field_pack = field.align.min(pack);
705                             align = align.max(field_pack);
706                         } else {
707                             align = align.max(field.align);
708                         }
709                         size = cmp::max(size, field.size);
710                     }
711
712                     return Ok(tcx.intern_layout(LayoutDetails {
713                         variants: Variants::Single { index: 0 },
714                         fields: FieldPlacement::Union(variants[0].len()),
715                         abi: Abi::Aggregate { sized: true },
716                         align,
717                         size: size.abi_align(align)
718                     }));
719                 }
720
721                 // A variant is absent if it's uninhabited and only has ZST fields.
722                 // Present uninhabited variants only require space for their fields,
723                 // but *not* an encoding of the discriminant (e.g. a tag value).
724                 // See issue #49298 for more details on the need to leave space
725                 // for non-ZST uninhabited data (mostly partial initialization).
726                 let absent = |fields: &[TyLayout<'_>]| {
727                     let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
728                     let is_zst = fields.iter().all(|f| f.is_zst());
729                     uninhabited && is_zst
730                 };
731                 let (present_first, present_second) = {
732                     let mut present_variants = (0..variants.len()).filter(|&v| {
733                         !absent(&variants[v])
734                     });
735                     (present_variants.next(), present_variants.next())
736                 };
737                 if present_first.is_none() {
738                     // Uninhabited because it has no variants, or only absent ones.
739                     return tcx.layout_raw(param_env.and(tcx.types.never));
740                 }
741
742                 let is_struct = !def.is_enum() ||
743                     // Only one variant is present.
744                     (present_second.is_none() &&
745                     // Representation optimizations are allowed.
746                     !def.repr.inhibit_enum_layout_opt());
747                 if is_struct {
748                     // Struct, or univariant enum equivalent to a struct.
749                     // (Typechecking will reject discriminant-sizing attrs.)
750
751                     let v = present_first.unwrap();
752                     let kind = if def.is_enum() || variants[v].len() == 0 {
753                         StructKind::AlwaysSized
754                     } else {
755                         let param_env = tcx.param_env(def.did);
756                         let last_field = def.variants[v].fields.last().unwrap();
757                         let always_sized = tcx.type_of(last_field.did)
758                                               .is_sized(tcx.at(DUMMY_SP), param_env);
759                         if !always_sized { StructKind::MaybeUnsized }
760                         else { StructKind::AlwaysSized }
761                     };
762
763                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
764                     st.variants = Variants::Single { index: v };
765                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
766                     match st.abi {
767                         Abi::Scalar(ref mut scalar) |
768                         Abi::ScalarPair(ref mut scalar, _) => {
769                             // the asserts ensure that we are not using the
770                             // `#[rustc_layout_scalar_valid_range(n)]`
771                             // attribute to widen the range of anything as that would probably
772                             // result in UB somewhere
773                             if let Bound::Included(start) = start {
774                                 assert!(*scalar.valid_range.start() <= start);
775                                 scalar.valid_range = start..=*scalar.valid_range.end();
776                             }
777                             if let Bound::Included(end) = end {
778                                 assert!(*scalar.valid_range.end() >= end);
779                                 scalar.valid_range = *scalar.valid_range.start()..=end;
780                             }
781                         }
782                         _ => assert!(
783                             start == Bound::Unbounded && end == Bound::Unbounded,
784                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
785                             def,
786                             st,
787                         ),
788                     }
789                     return Ok(tcx.intern_layout(st));
790                 }
791
792                 // The current code for niche-filling relies on variant indices
793                 // instead of actual discriminants, so dataful enums with
794                 // explicit discriminants (RFC #2363) would misbehave.
795                 let no_explicit_discriminants = def.variants.iter().enumerate()
796                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
797
798                 // Niche-filling enum optimization.
799                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
800                     let mut dataful_variant = None;
801                     let mut niche_variants = usize::max_value()..=0;
802
803                     // Find one non-ZST variant.
804                     'variants: for (v, fields) in variants.iter().enumerate() {
805                         if absent(fields) {
806                             continue 'variants;
807                         }
808                         for f in fields {
809                             if !f.is_zst() {
810                                 if dataful_variant.is_none() {
811                                     dataful_variant = Some(v);
812                                     continue 'variants;
813                                 } else {
814                                     dataful_variant = None;
815                                     break 'variants;
816                                 }
817                             }
818                         }
819                         niche_variants = *niche_variants.start().min(&v)..=v;
820                     }
821
822                     if niche_variants.start() > niche_variants.end() {
823                         dataful_variant = None;
824                     }
825
826                     if let Some(i) = dataful_variant {
827                         let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
828                         for (field_index, &field) in variants[i].iter().enumerate() {
829                             let niche = match self.find_niche(field)? {
830                                 Some(niche) => niche,
831                                 _ => continue,
832                             };
833                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
834                                 Some(pair) => pair,
835                                 None => continue,
836                             };
837
838                             let mut align = dl.aggregate_align;
839                             let st = variants.iter().enumerate().map(|(j, v)| {
840                                 let mut st = univariant_uninterned(v,
841                                     &def.repr, StructKind::AlwaysSized)?;
842                                 st.variants = Variants::Single { index: j };
843
844                                 align = align.max(st.align);
845
846                                 Ok(st)
847                             }).collect::<Result<Vec<_>, _>>()?;
848
849                             let offset = st[i].fields.offset(field_index) + niche.offset;
850                             let size = st[i].size;
851
852                             let mut abi = match st[i].abi {
853                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
854                                 Abi::ScalarPair(ref first, ref second) => {
855                                     // We need to use scalar_unit to reset the
856                                     // valid range to the maximal one for that
857                                     // primitive, because only the niche is
858                                     // guaranteed to be initialised, not the
859                                     // other primitive.
860                                     if offset.bytes() == 0 {
861                                         Abi::ScalarPair(
862                                             niche_scalar.clone(),
863                                             scalar_unit(second.value),
864                                         )
865                                     } else {
866                                         Abi::ScalarPair(
867                                             scalar_unit(first.value),
868                                             niche_scalar.clone(),
869                                         )
870                                     }
871                                 }
872                                 _ => Abi::Aggregate { sized: true },
873                             };
874
875                             if st.iter().all(|v| v.abi.is_uninhabited()) {
876                                 abi = Abi::Uninhabited;
877                             }
878
879                             return Ok(tcx.intern_layout(LayoutDetails {
880                                 variants: Variants::NicheFilling {
881                                     dataful_variant: i,
882                                     niche_variants,
883                                     niche: niche_scalar,
884                                     niche_start,
885                                     variants: st,
886                                 },
887                                 fields: FieldPlacement::Arbitrary {
888                                     offsets: vec![offset],
889                                     memory_index: vec![0]
890                                 },
891                                 abi,
892                                 size,
893                                 align,
894                             }));
895                         }
896                     }
897                 }
898
899                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
900                 let discr_type = def.repr.discr_type();
901                 let bits = Integer::from_attr(self, discr_type).size().bits();
902                 for (i, discr) in def.discriminants(tcx).enumerate() {
903                     if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
904                         continue;
905                     }
906                     let mut x = discr.val as i128;
907                     if discr_type.is_signed() {
908                         // sign extend the raw representation to be an i128
909                         x = (x << (128 - bits)) >> (128 - bits);
910                     }
911                     if x < min { min = x; }
912                     if x > max { max = x; }
913                 }
914                 // We might have no inhabited variants, so pretend there's at least one.
915                 if (min, max) == (i128::max_value(), i128::min_value()) {
916                     min = 0;
917                     max = 0;
918                 }
919                 assert!(min <= max, "discriminant range is {}...{}", min, max);
920                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
921
922                 let mut align = dl.aggregate_align;
923                 let mut size = Size::ZERO;
924
925                 // We're interested in the smallest alignment, so start large.
926                 let mut start_align = Align::from_bytes(256, 256).unwrap();
927                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
928
929                 // repr(C) on an enum tells us to make a (tag, union) layout,
930                 // so we need to grow the prefix alignment to be at least
931                 // the alignment of the union. (This value is used both for
932                 // determining the alignment of the overall enum, and the
933                 // determining the alignment of the payload after the tag.)
934                 let mut prefix_align = min_ity.align(dl);
935                 if def.repr.c() {
936                     for fields in &variants {
937                         for field in fields {
938                             prefix_align = prefix_align.max(field.align);
939                         }
940                     }
941                 }
942
943                 // Create the set of structs that represent each variant.
944                 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
945                     let mut st = univariant_uninterned(&field_layouts,
946                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
947                     st.variants = Variants::Single { index: i };
948                     // Find the first field we can't move later
949                     // to make room for a larger discriminant.
950                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
951                         if !field.is_zst() || field.align.abi() != 1 {
952                             start_align = start_align.min(field.align);
953                             break;
954                         }
955                     }
956                     size = cmp::max(size, st.size);
957                     align = align.max(st.align);
958                     Ok(st)
959                 }).collect::<Result<Vec<_>, _>>()?;
960
961                 // Align the maximum variant size to the largest alignment.
962                 size = size.abi_align(align);
963
964                 if size.bytes() >= dl.obj_size_bound() {
965                     return Err(LayoutError::SizeOverflow(ty));
966                 }
967
968                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
969                 if typeck_ity < min_ity {
970                     // It is a bug if Layout decided on a greater discriminant size than typeck for
971                     // some reason at this point (based on values discriminant can take on). Mostly
972                     // because this discriminant will be loaded, and then stored into variable of
973                     // type calculated by typeck. Consider such case (a bug): typeck decided on
974                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
975                     // discriminant values. That would be a bug, because then, in codegen, in order
976                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
977                     // space necessary to represent would have to be discarded (or layout is wrong
978                     // on thinking it needs 16 bits)
979                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
980                          min_ity, typeck_ity);
981                     // However, it is fine to make discr type however large (as an optimisation)
982                     // after this point â€“ we’ll just truncate the value we load in codegen.
983                 }
984
985                 // Check to see if we should use a different type for the
986                 // discriminant. We can safely use a type with the same size
987                 // as the alignment of the first field of each variant.
988                 // We increase the size of the discriminant to avoid LLVM copying
989                 // padding when it doesn't need to. This normally causes unaligned
990                 // load/stores and excessive memcpy/memset operations. By using a
991                 // bigger integer size, LLVM can be sure about its contents and
992                 // won't be so conservative.
993
994                 // Use the initial field alignment
995                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
996                     min_ity
997                 } else {
998                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
999                 };
1000
1001                 // If the alignment is not larger than the chosen discriminant size,
1002                 // don't use the alignment as the final size.
1003                 if ity <= min_ity {
1004                     ity = min_ity;
1005                 } else {
1006                     // Patch up the variants' first few fields.
1007                     let old_ity_size = min_ity.size();
1008                     let new_ity_size = ity.size();
1009                     for variant in &mut layout_variants {
1010                         match variant.fields {
1011                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1012                                 for i in offsets {
1013                                     if *i <= old_ity_size {
1014                                         assert_eq!(*i, old_ity_size);
1015                                         *i = new_ity_size;
1016                                     }
1017                                 }
1018                                 // We might be making the struct larger.
1019                                 if variant.size <= old_ity_size {
1020                                     variant.size = new_ity_size;
1021                                 }
1022                             }
1023                             _ => bug!()
1024                         }
1025                     }
1026                 }
1027
1028                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1029                 let tag = Scalar {
1030                     value: Int(ity, signed),
1031                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1032                 };
1033                 let mut abi = Abi::Aggregate { sized: true };
1034                 if tag.value.size(dl) == size {
1035                     abi = Abi::Scalar(tag.clone());
1036                 } else {
1037                     // Try to use a ScalarPair for all tagged enums.
1038                     let mut common_prim = None;
1039                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1040                         let offsets = match layout_variant.fields {
1041                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1042                             _ => bug!(),
1043                         };
1044                         let mut fields = field_layouts
1045                             .iter()
1046                             .zip(offsets)
1047                             .filter(|p| !p.0.is_zst());
1048                         let (field, offset) = match (fields.next(), fields.next()) {
1049                             (None, None) => continue,
1050                             (Some(pair), None) => pair,
1051                             _ => {
1052                                 common_prim = None;
1053                                 break;
1054                             }
1055                         };
1056                         let prim = match field.details.abi {
1057                             Abi::Scalar(ref scalar) => scalar.value,
1058                             _ => {
1059                                 common_prim = None;
1060                                 break;
1061                             }
1062                         };
1063                         if let Some(pair) = common_prim {
1064                             // This is pretty conservative. We could go fancier
1065                             // by conflating things like i32 and u32, or even
1066                             // realising that (u8, u8) could just cohabit with
1067                             // u16 or even u32.
1068                             if pair != (prim, offset) {
1069                                 common_prim = None;
1070                                 break;
1071                             }
1072                         } else {
1073                             common_prim = Some((prim, offset));
1074                         }
1075                     }
1076                     if let Some((prim, offset)) = common_prim {
1077                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1078                         let pair_offsets = match pair.fields {
1079                             FieldPlacement::Arbitrary {
1080                                 ref offsets,
1081                                 ref memory_index
1082                             } => {
1083                                 assert_eq!(memory_index, &[0, 1]);
1084                                 offsets
1085                             }
1086                             _ => bug!()
1087                         };
1088                         if pair_offsets[0] == Size::ZERO &&
1089                             pair_offsets[1] == *offset &&
1090                             align == pair.align &&
1091                             size == pair.size {
1092                             // We can use `ScalarPair` only when it matches our
1093                             // already computed layout (including `#[repr(C)]`).
1094                             abi = pair.abi;
1095                         }
1096                     }
1097                 }
1098
1099                 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1100                     abi = Abi::Uninhabited;
1101                 }
1102
1103                 tcx.intern_layout(LayoutDetails {
1104                     variants: Variants::Tagged {
1105                         tag,
1106                         variants: layout_variants,
1107                     },
1108                     fields: FieldPlacement::Arbitrary {
1109                         offsets: vec![Size::ZERO],
1110                         memory_index: vec![0]
1111                     },
1112                     abi,
1113                     align,
1114                     size
1115                 })
1116             }
1117
1118             // Types with no meaningful known layout.
1119             ty::Projection(_) | ty::Opaque(..) => {
1120                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1121                 if ty == normalized {
1122                     return Err(LayoutError::Unknown(ty));
1123                 }
1124                 tcx.layout_raw(param_env.and(normalized))?
1125             }
1126
1127             ty::Bound(..) |
1128             ty::UnnormalizedProjection(..) |
1129             ty::GeneratorWitness(..) |
1130             ty::Infer(_) => {
1131                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1132             }
1133
1134             ty::Param(_) | ty::Error => {
1135                 return Err(LayoutError::Unknown(ty));
1136             }
1137         })
1138     }
1139
1140     /// This is invoked by the `layout_raw` query to record the final
1141     /// layout of each type.
1142     #[inline]
1143     fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1144         // If we are running with `-Zprint-type-sizes`, record layouts for
1145         // dumping later. Ignore layouts that are done with non-empty
1146         // environments or non-monomorphic layouts, as the user only wants
1147         // to see the stuff resulting from the final codegen session.
1148         if
1149             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1150             layout.ty.has_param_types() ||
1151             layout.ty.has_self_ty() ||
1152             !self.param_env.caller_bounds.is_empty()
1153         {
1154             return;
1155         }
1156
1157         self.record_layout_for_printing_outlined(layout)
1158     }
1159
1160     fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1161         // (delay format until we actually need it)
1162         let record = |kind, packed, opt_discr_size, variants| {
1163             let type_desc = format!("{:?}", layout.ty);
1164             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1165                                                                    type_desc,
1166                                                                    layout.align,
1167                                                                    layout.size,
1168                                                                    packed,
1169                                                                    opt_discr_size,
1170                                                                    variants);
1171         };
1172
1173         let adt_def = match layout.ty.sty {
1174             ty::Adt(ref adt_def, _) => {
1175                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1176                 adt_def
1177             }
1178
1179             ty::Closure(..) => {
1180                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1181                 record(DataTypeKind::Closure, false, None, vec![]);
1182                 return;
1183             }
1184
1185             _ => {
1186                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1187                 return;
1188             }
1189         };
1190
1191         let adt_kind = adt_def.adt_kind();
1192         let adt_packed = adt_def.repr.packed();
1193
1194         let build_variant_info = |n: Option<ast::Name>,
1195                                   flds: &[ast::Name],
1196                                   layout: TyLayout<'tcx>| {
1197             let mut min_size = Size::ZERO;
1198             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1199                 match layout.field(self, i) {
1200                     Err(err) => {
1201                         bug!("no layout found for field {}: `{:?}`", name, err);
1202                     }
1203                     Ok(field_layout) => {
1204                         let offset = layout.fields.offset(i);
1205                         let field_end = offset + field_layout.size;
1206                         if min_size < field_end {
1207                             min_size = field_end;
1208                         }
1209                         session::FieldInfo {
1210                             name: name.to_string(),
1211                             offset: offset.bytes(),
1212                             size: field_layout.size.bytes(),
1213                             align: field_layout.align.abi(),
1214                         }
1215                     }
1216                 }
1217             }).collect();
1218
1219             session::VariantInfo {
1220                 name: n.map(|n|n.to_string()),
1221                 kind: if layout.is_unsized() {
1222                     session::SizeKind::Min
1223                 } else {
1224                     session::SizeKind::Exact
1225                 },
1226                 align: layout.align.abi(),
1227                 size: if min_size.bytes() == 0 {
1228                     layout.size.bytes()
1229                 } else {
1230                     min_size.bytes()
1231                 },
1232                 fields: field_info,
1233             }
1234         };
1235
1236         match layout.variants {
1237             Variants::Single { index } => {
1238                 debug!("print-type-size `{:#?}` variant {}",
1239                        layout, adt_def.variants[index].name);
1240                 if !adt_def.variants.is_empty() {
1241                     let variant_def = &adt_def.variants[index];
1242                     let fields: Vec<_> =
1243                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1244                     record(adt_kind.into(),
1245                            adt_packed,
1246                            None,
1247                            vec![build_variant_info(Some(variant_def.name),
1248                                                    &fields,
1249                                                    layout)]);
1250                 } else {
1251                     // (This case arises for *empty* enums; so give it
1252                     // zero variants.)
1253                     record(adt_kind.into(), adt_packed, None, vec![]);
1254                 }
1255             }
1256
1257             Variants::NicheFilling { .. } |
1258             Variants::Tagged { .. } => {
1259                 debug!("print-type-size `{:#?}` adt general variants def {}",
1260                        layout.ty, adt_def.variants.len());
1261                 let variant_infos: Vec<_> =
1262                     adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1263                         let fields: Vec<_> =
1264                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1265                         build_variant_info(Some(variant_def.name),
1266                                            &fields,
1267                                            layout.for_variant(self, i))
1268                     })
1269                     .collect();
1270                 record(adt_kind.into(), adt_packed, match layout.variants {
1271                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1272                     _ => None
1273                 }, variant_infos);
1274             }
1275         }
1276     }
1277 }
1278
1279 /// Type size "skeleton", i.e. the only information determining a type's size.
1280 /// While this is conservative, (aside from constant sizes, only pointers,
1281 /// newtypes thereof and null pointer optimized enums are allowed), it is
1282 /// enough to statically check common usecases of transmute.
1283 #[derive(Copy, Clone, Debug)]
1284 pub enum SizeSkeleton<'tcx> {
1285     /// Any statically computable Layout.
1286     Known(Size),
1287
1288     /// A potentially-fat pointer.
1289     Pointer {
1290         /// If true, this pointer is never null.
1291         non_zero: bool,
1292         /// The type which determines the unsized metadata, if any,
1293         /// of this pointer. Either a type parameter or a projection
1294         /// depending on one, with regions erased.
1295         tail: Ty<'tcx>
1296     }
1297 }
1298
1299 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1300     pub fn compute(ty: Ty<'tcx>,
1301                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1302                    param_env: ty::ParamEnv<'tcx>)
1303                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1304         debug_assert!(!ty.has_infer_types());
1305
1306         // First try computing a static layout.
1307         let err = match tcx.layout_of(param_env.and(ty)) {
1308             Ok(layout) => {
1309                 return Ok(SizeSkeleton::Known(layout.size));
1310             }
1311             Err(err) => err
1312         };
1313
1314         match ty.sty {
1315             ty::Ref(_, pointee, _) |
1316             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1317                 let non_zero = !ty.is_unsafe_ptr();
1318                 let tail = tcx.struct_tail(pointee);
1319                 match tail.sty {
1320                     ty::Param(_) | ty::Projection(_) => {
1321                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1322                         Ok(SizeSkeleton::Pointer {
1323                             non_zero,
1324                             tail: tcx.erase_regions(&tail)
1325                         })
1326                     }
1327                     _ => {
1328                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1329                               tail `{}` is not a type parameter or a projection",
1330                              ty, err, tail)
1331                     }
1332                 }
1333             }
1334
1335             ty::Adt(def, substs) => {
1336                 // Only newtypes and enums w/ nullable pointer optimization.
1337                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1338                     return Err(err);
1339                 }
1340
1341                 // Get a zero-sized variant or a pointer newtype.
1342                 let zero_or_ptr_variant = |i: usize| {
1343                     let fields = def.variants[i].fields.iter().map(|field| {
1344                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1345                     });
1346                     let mut ptr = None;
1347                     for field in fields {
1348                         let field = field?;
1349                         match field {
1350                             SizeSkeleton::Known(size) => {
1351                                 if size.bytes() > 0 {
1352                                     return Err(err);
1353                                 }
1354                             }
1355                             SizeSkeleton::Pointer {..} => {
1356                                 if ptr.is_some() {
1357                                     return Err(err);
1358                                 }
1359                                 ptr = Some(field);
1360                             }
1361                         }
1362                     }
1363                     Ok(ptr)
1364                 };
1365
1366                 let v0 = zero_or_ptr_variant(0)?;
1367                 // Newtype.
1368                 if def.variants.len() == 1 {
1369                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1370                         return Ok(SizeSkeleton::Pointer {
1371                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1372                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1373                                 (Bound::Included(start), Bound::Included(end)) =>
1374                                     0 < start && start < end,
1375                                 _ => false,
1376                             },
1377                             tail,
1378                         });
1379                     } else {
1380                         return Err(err);
1381                     }
1382                 }
1383
1384                 let v1 = zero_or_ptr_variant(1)?;
1385                 // Nullable pointer enum optimization.
1386                 match (v0, v1) {
1387                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1388                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1389                         Ok(SizeSkeleton::Pointer {
1390                             non_zero: false,
1391                             tail,
1392                         })
1393                     }
1394                     _ => Err(err)
1395                 }
1396             }
1397
1398             ty::Projection(_) | ty::Opaque(..) => {
1399                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1400                 if ty == normalized {
1401                     Err(err)
1402                 } else {
1403                     SizeSkeleton::compute(normalized, tcx, param_env)
1404                 }
1405             }
1406
1407             _ => Err(err)
1408         }
1409     }
1410
1411     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1412         match (self, other) {
1413             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1414             (SizeSkeleton::Pointer { tail: a, .. },
1415              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1416             _ => false
1417         }
1418     }
1419 }
1420
1421 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1422     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1423 }
1424
1425 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1426     fn data_layout(&self) -> &TargetDataLayout {
1427         &self.data_layout
1428     }
1429 }
1430
1431 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1432     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1433         self.global_tcx()
1434     }
1435 }
1436
1437 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1438     fn data_layout(&self) -> &TargetDataLayout {
1439         self.tcx.data_layout()
1440     }
1441 }
1442
1443 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1444     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1445         self.tcx.tcx()
1446     }
1447 }
1448
1449 pub trait MaybeResult<T> {
1450     fn from_ok(x: T) -> Self;
1451     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1452 }
1453
1454 impl<T> MaybeResult<T> for T {
1455     fn from_ok(x: T) -> Self {
1456         x
1457     }
1458     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1459         f(self)
1460     }
1461 }
1462
1463 impl<T, E> MaybeResult<T> for Result<T, E> {
1464     fn from_ok(x: T) -> Self {
1465         Ok(x)
1466     }
1467     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1468         self.map(f)
1469     }
1470 }
1471
1472 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1473
1474 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1475     type Ty = Ty<'tcx>;
1476     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1477
1478     /// Computes the layout of a type. Note that this implicitly
1479     /// executes in "reveal all" mode.
1480     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1481         let param_env = self.param_env.with_reveal_all();
1482         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1483         let details = self.tcx.layout_raw(param_env.and(ty))?;
1484         let layout = TyLayout {
1485             ty,
1486             details
1487         };
1488
1489         // NB: This recording is normally disabled; when enabled, it
1490         // can however trigger recursive invocations of `layout_of`.
1491         // Therefore, we execute it *after* the main query has
1492         // completed, to avoid problems around recursive structures
1493         // and the like. (Admittedly, I wasn't able to reproduce a problem
1494         // here, but it seems like the right thing to do. -nmatsakis)
1495         self.record_layout_for_printing(layout);
1496
1497         Ok(layout)
1498     }
1499 }
1500
1501 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1502     type Ty = Ty<'tcx>;
1503     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1504
1505     /// Computes the layout of a type. Note that this implicitly
1506     /// executes in "reveal all" mode.
1507     fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1508         let param_env = self.param_env.with_reveal_all();
1509         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1510         let details = self.tcx.layout_raw(param_env.and(ty))?;
1511         let layout = TyLayout {
1512             ty,
1513             details
1514         };
1515
1516         // NB: This recording is normally disabled; when enabled, it
1517         // can however trigger recursive invocations of `layout_of`.
1518         // Therefore, we execute it *after* the main query has
1519         // completed, to avoid problems around recursive structures
1520         // and the like. (Admittedly, I wasn't able to reproduce a problem
1521         // here, but it seems like the right thing to do. -nmatsakis)
1522         let cx = LayoutCx {
1523             tcx: *self.tcx,
1524             param_env: self.param_env
1525         };
1526         cx.record_layout_for_printing(layout);
1527
1528         Ok(layout)
1529     }
1530 }
1531
1532 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1533 impl TyCtxt<'a, 'tcx, '_> {
1534     /// Computes the layout of a type. Note that this implicitly
1535     /// executes in "reveal all" mode.
1536     #[inline]
1537     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1538                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1539         let cx = LayoutCx {
1540             tcx: self.global_tcx(),
1541             param_env: param_env_and_ty.param_env
1542         };
1543         cx.layout_of(param_env_and_ty.value)
1544     }
1545 }
1546
1547 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1548     /// Computes the layout of a type. Note that this implicitly
1549     /// executes in "reveal all" mode.
1550     #[inline]
1551     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1552                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1553         let cx = LayoutCx {
1554             tcx: self.global_tcx().at(self.span),
1555             param_env: param_env_and_ty.param_env
1556         };
1557         cx.layout_of(param_env_and_ty.value)
1558     }
1559 }
1560
1561 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1562     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1563           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1564 {
1565     fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: usize) -> TyLayout<'tcx> {
1566         let details = match this.variants {
1567             Variants::Single { index } if index == variant_index => this.details,
1568
1569             Variants::Single { index } => {
1570                 // Deny calling for_variant more than once for non-Single enums.
1571                 cx.layout_of(this.ty).map_same(|layout| {
1572                     assert_eq!(layout.variants, Variants::Single { index });
1573                     layout
1574                 });
1575
1576                 let fields = match this.ty.sty {
1577                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1578                     _ => bug!()
1579                 };
1580                 let tcx = cx.tcx();
1581                 tcx.intern_layout(LayoutDetails {
1582                     variants: Variants::Single { index: variant_index },
1583                     fields: FieldPlacement::Union(fields),
1584                     abi: Abi::Uninhabited,
1585                     align: tcx.data_layout.i8_align,
1586                     size: Size::ZERO
1587                 })
1588             }
1589
1590             Variants::NicheFilling { ref variants, .. } |
1591             Variants::Tagged { ref variants, .. } => {
1592                 &variants[variant_index]
1593             }
1594         };
1595
1596         assert_eq!(details.variants, Variants::Single { index: variant_index });
1597
1598         TyLayout {
1599             ty: this.ty,
1600             details
1601         }
1602     }
1603
1604     fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1605         let tcx = cx.tcx();
1606         cx.layout_of(match this.ty.sty {
1607             ty::Bool |
1608             ty::Char |
1609             ty::Int(_) |
1610             ty::Uint(_) |
1611             ty::Float(_) |
1612             ty::FnPtr(_) |
1613             ty::Never |
1614             ty::FnDef(..) |
1615             ty::GeneratorWitness(..) |
1616             ty::Foreign(..) |
1617             ty::Dynamic(..) => {
1618                 bug!("TyLayout::field_type({:?}): not applicable", this)
1619             }
1620
1621             // Potentially-fat pointers.
1622             ty::Ref(_, pointee, _) |
1623             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1624                 assert!(i < this.fields.count());
1625
1626                 // Reuse the fat *T type as its own thin pointer data field.
1627                 // This provides information about e.g. DST struct pointees
1628                 // (which may have no non-DST form), and will work as long
1629                 // as the `Abi` or `FieldPlacement` is checked by users.
1630                 if i == 0 {
1631                     let nil = tcx.mk_unit();
1632                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1633                         tcx.mk_mut_ptr(nil)
1634                     } else {
1635                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1636                     };
1637                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1638                         ptr_layout.ty = this.ty;
1639                         ptr_layout
1640                     });
1641                 }
1642
1643                 match tcx.struct_tail(pointee).sty {
1644                     ty::Slice(_) |
1645                     ty::Str => tcx.types.usize,
1646                     ty::Dynamic(_, _) => {
1647                         tcx.mk_imm_ref(
1648                             tcx.types.re_static,
1649                             tcx.mk_array(tcx.types.usize, 3),
1650                         )
1651                         /* FIXME use actual fn pointers
1652                         Warning: naively computing the number of entries in the
1653                         vtable by counting the methods on the trait + methods on
1654                         all parent traits does not work, because some methods can
1655                         be not object safe and thus excluded from the vtable.
1656                         Increase this counter if you tried to implement this but
1657                         failed to do it without duplicating a lot of code from
1658                         other places in the compiler: 2
1659                         tcx.mk_tup(&[
1660                             tcx.mk_array(tcx.types.usize, 3),
1661                             tcx.mk_array(Option<fn()>),
1662                         ])
1663                         */
1664                     }
1665                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1666                 }
1667             }
1668
1669             // Arrays and slices.
1670             ty::Array(element, _) |
1671             ty::Slice(element) => element,
1672             ty::Str => tcx.types.u8,
1673
1674             // Tuples, generators and closures.
1675             ty::Closure(def_id, ref substs) => {
1676                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1677             }
1678
1679             ty::Generator(def_id, ref substs, _) => {
1680                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1681             }
1682
1683             ty::Tuple(tys) => tys[i],
1684
1685             // SIMD vector types.
1686             ty::Adt(def, ..) if def.repr.simd() => {
1687                 this.ty.simd_type(tcx)
1688             }
1689
1690             // ADTs.
1691             ty::Adt(def, substs) => {
1692                 match this.variants {
1693                     Variants::Single { index } => {
1694                         def.variants[index].fields[i].ty(tcx, substs)
1695                     }
1696
1697                     // Discriminant field for enums (where applicable).
1698                     Variants::Tagged { tag: ref discr, .. } |
1699                     Variants::NicheFilling { niche: ref discr, .. } => {
1700                         assert_eq!(i, 0);
1701                         let layout = LayoutDetails::scalar(cx, discr.clone());
1702                         return MaybeResult::from_ok(TyLayout {
1703                             details: tcx.intern_layout(layout),
1704                             ty: discr.value.to_ty(tcx)
1705                         });
1706                     }
1707                 }
1708             }
1709
1710             ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1711             ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1712                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1713             }
1714         })
1715     }
1716 }
1717
1718 struct Niche {
1719     offset: Size,
1720     scalar: Scalar,
1721     available: u128,
1722 }
1723
1724 impl Niche {
1725     fn reserve<'a, 'tcx>(
1726         &self,
1727         cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1728         count: u128,
1729     ) -> Option<(u128, Scalar)> {
1730         if count > self.available {
1731             return None;
1732         }
1733         let Scalar { value, valid_range: ref v } = self.scalar;
1734         let bits = value.size(cx).bits();
1735         assert!(bits <= 128);
1736         let max_value = !0u128 >> (128 - bits);
1737         let start = v.end().wrapping_add(1) & max_value;
1738         let end = v.end().wrapping_add(count) & max_value;
1739         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1740     }
1741 }
1742
1743 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1744     /// Find the offset of a niche leaf field, starting from
1745     /// the given type and recursing through aggregates.
1746     // FIXME(eddyb) traverse already optimized enums.
1747     fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1748         let scalar_niche = |scalar: &Scalar, offset| {
1749             let Scalar { value, valid_range: ref v } = *scalar;
1750
1751             let bits = value.size(self).bits();
1752             assert!(bits <= 128);
1753             let max_value = !0u128 >> (128 - bits);
1754
1755             // Find out how many values are outside the valid range.
1756             let available = if v.start() <= v.end() {
1757                 v.start() + (max_value - v.end())
1758             } else {
1759                 v.start() - v.end() - 1
1760             };
1761
1762             // Give up if there is no niche value available.
1763             if available == 0 {
1764                 return None;
1765             }
1766
1767             Some(Niche { offset, scalar: scalar.clone(), available })
1768         };
1769
1770         // Locals variables which live across yields are stored
1771         // in the generator type as fields. These may be uninitialized
1772         // so we don't look for niches there.
1773         if let ty::Generator(..) = layout.ty.sty {
1774             return Ok(None);
1775         }
1776
1777         match layout.abi {
1778             Abi::Scalar(ref scalar) => {
1779                 return Ok(scalar_niche(scalar, Size::ZERO));
1780             }
1781             Abi::ScalarPair(ref a, ref b) => {
1782                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1783                 // returns the last maximum.
1784                 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1785                     .chain(iter::once((a, Size::ZERO)))
1786                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1787                     .max_by_key(|niche| niche.available);
1788                 return Ok(niche);
1789             }
1790             Abi::Vector { ref element, .. } => {
1791                 return Ok(scalar_niche(element, Size::ZERO));
1792             }
1793             _ => {}
1794         }
1795
1796         // Perhaps one of the fields is non-zero, let's recurse and find out.
1797         if let FieldPlacement::Union(_) = layout.fields {
1798             // Only Rust enums have safe-to-inspect fields
1799             // (a discriminant), other unions are unsafe.
1800             if let Variants::Single { .. } = layout.variants {
1801                 return Ok(None);
1802             }
1803         }
1804         if let FieldPlacement::Array { .. } = layout.fields {
1805             if layout.fields.count() > 0 {
1806                 return self.find_niche(layout.field(self, 0)?);
1807             } else {
1808                 return Ok(None);
1809             }
1810         }
1811         let mut niche = None;
1812         let mut available = 0;
1813         for i in 0..layout.fields.count() {
1814             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1815                 if c.available > available {
1816                     available = c.available;
1817                     c.offset += layout.fields.offset(i);
1818                     niche = Some(c);
1819                 }
1820             }
1821         }
1822         Ok(niche)
1823     }
1824 }
1825
1826 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1827     fn hash_stable<W: StableHasherResult>(&self,
1828                                           hcx: &mut StableHashingContext<'a>,
1829                                           hasher: &mut StableHasher<W>) {
1830         use ty::layout::Variants::*;
1831         mem::discriminant(self).hash_stable(hcx, hasher);
1832
1833         match *self {
1834             Single { index } => {
1835                 index.hash_stable(hcx, hasher);
1836             }
1837             Tagged {
1838                 ref tag,
1839                 ref variants,
1840             } => {
1841                 tag.hash_stable(hcx, hasher);
1842                 variants.hash_stable(hcx, hasher);
1843             }
1844             NicheFilling {
1845                 dataful_variant,
1846                 ref niche_variants,
1847                 ref niche,
1848                 niche_start,
1849                 ref variants,
1850             } => {
1851                 dataful_variant.hash_stable(hcx, hasher);
1852                 niche_variants.start().hash_stable(hcx, hasher);
1853                 niche_variants.end().hash_stable(hcx, hasher);
1854                 niche.hash_stable(hcx, hasher);
1855                 niche_start.hash_stable(hcx, hasher);
1856                 variants.hash_stable(hcx, hasher);
1857             }
1858         }
1859     }
1860 }
1861
1862 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1863     fn hash_stable<W: StableHasherResult>(&self,
1864                                           hcx: &mut StableHashingContext<'a>,
1865                                           hasher: &mut StableHasher<W>) {
1866         use ty::layout::FieldPlacement::*;
1867         mem::discriminant(self).hash_stable(hcx, hasher);
1868
1869         match *self {
1870             Union(count) => {
1871                 count.hash_stable(hcx, hasher);
1872             }
1873             Array { count, stride } => {
1874                 count.hash_stable(hcx, hasher);
1875                 stride.hash_stable(hcx, hasher);
1876             }
1877             Arbitrary { ref offsets, ref memory_index } => {
1878                 offsets.hash_stable(hcx, hasher);
1879                 memory_index.hash_stable(hcx, hasher);
1880             }
1881         }
1882     }
1883 }
1884
1885 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1886     fn hash_stable<W: StableHasherResult>(&self,
1887                                           hcx: &mut StableHashingContext<'a>,
1888                                           hasher: &mut StableHasher<W>) {
1889         use ty::layout::Abi::*;
1890         mem::discriminant(self).hash_stable(hcx, hasher);
1891
1892         match *self {
1893             Uninhabited => {}
1894             Scalar(ref value) => {
1895                 value.hash_stable(hcx, hasher);
1896             }
1897             ScalarPair(ref a, ref b) => {
1898                 a.hash_stable(hcx, hasher);
1899                 b.hash_stable(hcx, hasher);
1900             }
1901             Vector { ref element, count } => {
1902                 element.hash_stable(hcx, hasher);
1903                 count.hash_stable(hcx, hasher);
1904             }
1905             Aggregate { sized } => {
1906                 sized.hash_stable(hcx, hasher);
1907             }
1908         }
1909     }
1910 }
1911
1912 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1913     fn hash_stable<W: StableHasherResult>(&self,
1914                                           hcx: &mut StableHashingContext<'a>,
1915                                           hasher: &mut StableHasher<W>) {
1916         let Scalar { value, ref valid_range } = *self;
1917         value.hash_stable(hcx, hasher);
1918         valid_range.start().hash_stable(hcx, hasher);
1919         valid_range.end().hash_stable(hcx, hasher);
1920     }
1921 }
1922
1923 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1924     variants,
1925     fields,
1926     abi,
1927     size,
1928     align
1929 });
1930
1931 impl_stable_hash_for!(enum ::ty::layout::Integer {
1932     I8,
1933     I16,
1934     I32,
1935     I64,
1936     I128
1937 });
1938
1939 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1940     Int(integer, signed),
1941     Float(fty),
1942     Pointer
1943 });
1944
1945 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1946     fn hash_stable<W: StableHasherResult>(&self,
1947                                           hcx: &mut StableHashingContext<'gcx>,
1948                                           hasher: &mut StableHasher<W>) {
1949         self.abi().hash_stable(hcx, hasher);
1950         self.pref().hash_stable(hcx, hasher);
1951     }
1952 }
1953
1954 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1955     fn hash_stable<W: StableHasherResult>(&self,
1956                                           hcx: &mut StableHashingContext<'gcx>,
1957                                           hasher: &mut StableHasher<W>) {
1958         self.bytes().hash_stable(hcx, hasher);
1959     }
1960 }
1961
1962 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1963 {
1964     fn hash_stable<W: StableHasherResult>(&self,
1965                                           hcx: &mut StableHashingContext<'a>,
1966                                           hasher: &mut StableHasher<W>) {
1967         use ty::layout::LayoutError::*;
1968         mem::discriminant(self).hash_stable(hcx, hasher);
1969
1970         match *self {
1971             Unknown(t) |
1972             SizeOverflow(t) => t.hash_stable(hcx, hasher)
1973         }
1974     }
1975 }