]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Switch wasm math symbols to their original names
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23
24 use ich::StableHashingContext;
25 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
26                                            StableHasherResult};
27
28 pub use rustc_target::abi::*;
29
30 pub trait IntegerExt {
31     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
34                   ty: Ty<'tcx>,
35                   repr: &ReprOptions,
36                   min: i128,
37                   max: i128)
38                   -> (Integer, bool);
39 }
40
41 impl IntegerExt for Integer {
42     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
43         match (*self, signed) {
44             (I8, false) => tcx.types.u8,
45             (I16, false) => tcx.types.u16,
46             (I32, false) => tcx.types.u32,
47             (I64, false) => tcx.types.u64,
48             (I128, false) => tcx.types.u128,
49             (I8, true) => tcx.types.i8,
50             (I16, true) => tcx.types.i16,
51             (I32, true) => tcx.types.i32,
52             (I64, true) => tcx.types.i64,
53             (I128, true) => tcx.types.i128,
54         }
55     }
56
57     /// Get the Integer type from an attr::IntType.
58     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
59         let dl = cx.data_layout();
60
61         match ity {
62             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
63             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
64             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
65             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
66             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
67             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
68                 dl.ptr_sized_integer()
69             }
70         }
71     }
72
73     /// Find the appropriate Integer type and signedness for the given
74     /// signed discriminant range and #[repr] attribute.
75     /// N.B.: u128 values above i128::MAX will be treated as signed, but
76     /// that shouldn't affect anything, other than maybe debuginfo.
77     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
78                   ty: Ty<'tcx>,
79                   repr: &ReprOptions,
80                   min: i128,
81                   max: i128)
82                   -> (Integer, bool) {
83         // Theoretically, negative values could be larger in unsigned representation
84         // than the unsigned representation of the signed minimum. However, if there
85         // are any negative values, the only valid unsigned representation is u128
86         // which can fit all i128 values, so the result remains unaffected.
87         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
88         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
89
90         let mut min_from_extern = None;
91         let min_default = I8;
92
93         if let Some(ity) = repr.int {
94             let discr = Integer::from_attr(tcx, ity);
95             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
96             if discr < fit {
97                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
98                   discriminant range of enum `{}", ty)
99             }
100             return (discr, ity.is_signed());
101         }
102
103         if repr.c() {
104             match &tcx.sess.target.target.arch[..] {
105                 // WARNING: the ARM EABI has two variants; the one corresponding
106                 // to `at_least == I32` appears to be used on Linux and NetBSD,
107                 // but some systems may use the variant corresponding to no
108                 // lower bound.  However, we don't run on those yet...?
109                 "arm" => min_from_extern = Some(I32),
110                 _ => min_from_extern = Some(I32),
111             }
112         }
113
114         let at_least = min_from_extern.unwrap_or(min_default);
115
116         // If there are no negative values, we can use the unsigned fit.
117         if min >= 0 {
118             (cmp::max(unsigned_fit, at_least), false)
119         } else {
120             (cmp::max(signed_fit, at_least), true)
121         }
122     }
123 }
124
125 pub trait PrimitiveExt {
126     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
127 }
128
129 impl PrimitiveExt for Primitive {
130     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
131         match *self {
132             Int(i, signed) => i.to_ty(tcx, signed),
133             Float(FloatTy::F32) => tcx.types.f32,
134             Float(FloatTy::F64) => tcx.types.f64,
135             Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
136         }
137     }
138 }
139
140 /// The first half of a fat pointer.
141 ///
142 /// - For a trait object, this is the address of the box.
143 /// - For a slice, this is the base address.
144 pub const FAT_PTR_ADDR: usize = 0;
145
146 /// The second half of a fat pointer.
147 ///
148 /// - For a trait object, this is the address of the vtable.
149 /// - For a slice, this is the length.
150 pub const FAT_PTR_EXTRA: usize = 1;
151
152 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
153 pub enum LayoutError<'tcx> {
154     Unknown(Ty<'tcx>),
155     SizeOverflow(Ty<'tcx>)
156 }
157
158 impl<'tcx> fmt::Display for LayoutError<'tcx> {
159     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
160         match *self {
161             LayoutError::Unknown(ty) => {
162                 write!(f, "the type `{:?}` has an unknown layout", ty)
163             }
164             LayoutError::SizeOverflow(ty) => {
165                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
166             }
167         }
168     }
169 }
170
171 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
172                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
173                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
174 {
175     ty::tls::with_related_context(tcx, move |icx| {
176         let rec_limit = *tcx.sess.recursion_limit.get();
177         let (param_env, ty) = query.into_parts();
178
179         if icx.layout_depth > rec_limit {
180             tcx.sess.fatal(
181                 &format!("overflow representing the type `{}`", ty));
182         }
183
184         // Update the ImplicitCtxt to increase the layout_depth
185         let icx = ty::tls::ImplicitCtxt {
186             layout_depth: icx.layout_depth + 1,
187             ..icx.clone()
188         };
189
190         ty::tls::enter_context(&icx, |_| {
191             let cx = LayoutCx { tcx, param_env };
192             cx.layout_raw_uncached(ty)
193         })
194     })
195 }
196
197 pub fn provide(providers: &mut ty::query::Providers) {
198     *providers = ty::query::Providers {
199         layout_raw,
200         ..*providers
201     };
202 }
203
204 #[derive(Copy, Clone)]
205 pub struct LayoutCx<'tcx, C> {
206     pub tcx: C,
207     pub param_env: ty::ParamEnv<'tcx>
208 }
209
210 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
211     fn layout_raw_uncached(self, ty: Ty<'tcx>)
212                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
213         let tcx = self.tcx;
214         let param_env = self.param_env;
215         let dl = self.data_layout();
216         let scalar_unit = |value: Primitive| {
217             let bits = value.size(dl).bits();
218             assert!(bits <= 128);
219             Scalar {
220                 value,
221                 valid_range: 0..=(!0 >> (128 - bits))
222             }
223         };
224         let scalar = |value: Primitive| {
225             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
226         };
227         let scalar_pair = |a: Scalar, b: Scalar| {
228             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
229             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
230             let size = (b_offset + b.value.size(dl)).abi_align(align);
231             LayoutDetails {
232                 variants: Variants::Single { index: 0 },
233                 fields: FieldPlacement::Arbitrary {
234                     offsets: vec![Size::ZERO, b_offset],
235                     memory_index: vec![0, 1]
236                 },
237                 abi: Abi::ScalarPair(a, b),
238                 align,
239                 size
240             }
241         };
242
243         #[derive(Copy, Clone, Debug)]
244         enum StructKind {
245             /// A tuple, closure, or univariant which cannot be coerced to unsized.
246             AlwaysSized,
247             /// A univariant, the last field of which may be coerced to unsized.
248             MaybeUnsized,
249             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
250             Prefixed(Size, Align),
251         }
252         let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
253             let packed = repr.packed();
254             if packed && repr.align > 0 {
255                 bug!("struct cannot be packed and aligned");
256             }
257
258             let pack = {
259                 let pack = repr.pack as u64;
260                 Align::from_bytes(pack, pack).unwrap()
261             };
262
263             let mut align = if packed {
264                 dl.i8_align
265             } else {
266                 dl.aggregate_align
267             };
268
269             let mut sized = true;
270             let mut offsets = vec![Size::ZERO; fields.len()];
271             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
272
273             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
274             if let StructKind::Prefixed(_, align) = kind {
275                 optimize &= align.abi() == 1;
276             }
277
278             if optimize {
279                 let end = if let StructKind::MaybeUnsized = kind {
280                     fields.len() - 1
281                 } else {
282                     fields.len()
283                 };
284                 let optimizing = &mut inverse_memory_index[..end];
285                 let field_align = |f: &TyLayout| {
286                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
287                 };
288                 match kind {
289                     StructKind::AlwaysSized |
290                     StructKind::MaybeUnsized => {
291                         optimizing.sort_by_key(|&x| {
292                             // Place ZSTs first to avoid "interesting offsets",
293                             // especially with only one or two non-ZST fields.
294                             let f = &fields[x as usize];
295                             (!f.is_zst(), cmp::Reverse(field_align(f)))
296                         });
297                     }
298                     StructKind::Prefixed(..) => {
299                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
300                     }
301                 }
302             }
303
304             // inverse_memory_index holds field indices by increasing memory offset.
305             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
306             // We now write field offsets to the corresponding offset slot;
307             // field 5 with offset 0 puts 0 in offsets[5].
308             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
309
310             let mut offset = Size::ZERO;
311
312             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
313                 if packed {
314                     let prefix_align = prefix_align.min(pack);
315                     align = align.max(prefix_align);
316                 } else {
317                     align = align.max(prefix_align);
318                 }
319                 offset = prefix_size.abi_align(prefix_align);
320             }
321
322             for &i in &inverse_memory_index {
323                 let field = fields[i as usize];
324                 if !sized {
325                     bug!("univariant: field #{} of `{}` comes after unsized field",
326                         offsets.len(), ty);
327                 }
328
329                 if field.is_unsized() {
330                     sized = false;
331                 }
332
333                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
334                 if packed {
335                     let field_pack = field.align.min(pack);
336                     offset = offset.abi_align(field_pack);
337                     align = align.max(field_pack);
338                 }
339                 else {
340                     offset = offset.abi_align(field.align);
341                     align = align.max(field.align);
342                 }
343
344                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
345                 offsets[i as usize] = offset;
346
347                 offset = offset.checked_add(field.size, dl)
348                     .ok_or(LayoutError::SizeOverflow(ty))?;
349             }
350
351             if repr.align > 0 {
352                 let repr_align = repr.align as u64;
353                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
354                 debug!("univariant repr_align: {:?}", repr_align);
355             }
356
357             debug!("univariant min_size: {:?}", offset);
358             let min_size = offset;
359
360             // As stated above, inverse_memory_index holds field indices by increasing offset.
361             // This makes it an already-sorted view of the offsets vec.
362             // To invert it, consider:
363             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
364             // Field 5 would be the first element, so memory_index is i:
365             // Note: if we didn't optimize, it's already right.
366
367             let mut memory_index;
368             if optimize {
369                 memory_index = vec![0; inverse_memory_index.len()];
370
371                 for i in 0..inverse_memory_index.len() {
372                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
373                 }
374             } else {
375                 memory_index = inverse_memory_index;
376             }
377
378             let size = min_size.abi_align(align);
379             let mut abi = Abi::Aggregate { sized };
380
381             // Unpack newtype ABIs and find scalar pairs.
382             if sized && size.bytes() > 0 {
383                 // All other fields must be ZSTs, and we need them to all start at 0.
384                 let mut zst_offsets =
385                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
386                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
387                     let mut non_zst_fields =
388                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
389
390                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
391                         // We have exactly one non-ZST field.
392                         (Some((i, field)), None, None) => {
393                             // Field fills the struct and it has a scalar or scalar pair ABI.
394                             if offsets[i].bytes() == 0 &&
395                                align.abi() == field.align.abi() &&
396                                size == field.size {
397                                 match field.abi {
398                                     // For plain scalars, or vectors of them, we can't unpack
399                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
400                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
401                                         abi = field.abi.clone();
402                                     }
403                                     // But scalar pairs are Rust-specific and get
404                                     // treated as aggregates by C ABIs anyway.
405                                     Abi::ScalarPair(..) => {
406                                         abi = field.abi.clone();
407                                     }
408                                     _ => {}
409                                 }
410                             }
411                         }
412
413                         // Two non-ZST fields, and they're both scalars.
414                         (Some((i, &TyLayout {
415                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
416                         })), Some((j, &TyLayout {
417                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
418                         })), None) => {
419                             // Order by the memory placement, not source order.
420                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
421                                 ((i, a), (j, b))
422                             } else {
423                                 ((j, b), (i, a))
424                             };
425                             let pair = scalar_pair(a.clone(), b.clone());
426                             let pair_offsets = match pair.fields {
427                                 FieldPlacement::Arbitrary {
428                                     ref offsets,
429                                     ref memory_index
430                                 } => {
431                                     assert_eq!(memory_index, &[0, 1]);
432                                     offsets
433                                 }
434                                 _ => bug!()
435                             };
436                             if offsets[i] == pair_offsets[0] &&
437                                offsets[j] == pair_offsets[1] &&
438                                align == pair.align &&
439                                size == pair.size {
440                                 // We can use `ScalarPair` only when it matches our
441                                 // already computed layout (including `#[repr(C)]`).
442                                 abi = pair.abi;
443                             }
444                         }
445
446                         _ => {}
447                     }
448                 }
449             }
450
451             if sized && fields.iter().any(|f| f.abi == Abi::Uninhabited) {
452                 abi = Abi::Uninhabited;
453             }
454
455             Ok(LayoutDetails {
456                 variants: Variants::Single { index: 0 },
457                 fields: FieldPlacement::Arbitrary {
458                     offsets,
459                     memory_index
460                 },
461                 abi,
462                 align,
463                 size
464             })
465         };
466         let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
467             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
468         };
469         debug_assert!(!ty.has_infer_types());
470
471         Ok(match ty.sty {
472             // Basic scalars.
473             ty::Bool => {
474                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
475                     value: Int(I8, false),
476                     valid_range: 0..=1
477                 }))
478             }
479             ty::Char => {
480                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
481                     value: Int(I32, false),
482                     valid_range: 0..=0x10FFFF
483                 }))
484             }
485             ty::Int(ity) => {
486                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
487             }
488             ty::Uint(ity) => {
489                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
490             }
491             ty::Float(fty) => scalar(Float(fty)),
492             ty::FnPtr(_) => {
493                 let mut ptr = scalar_unit(Pointer);
494                 ptr.valid_range = 1..=*ptr.valid_range.end();
495                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
496             }
497
498             // The never type.
499             ty::Never => {
500                 tcx.intern_layout(LayoutDetails {
501                     variants: Variants::Single { index: 0 },
502                     fields: FieldPlacement::Union(0),
503                     abi: Abi::Uninhabited,
504                     align: dl.i8_align,
505                     size: Size::ZERO
506                 })
507             }
508
509             // Potentially-fat pointers.
510             ty::Ref(_, pointee, _) |
511             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
512                 let mut data_ptr = scalar_unit(Pointer);
513                 if !ty.is_unsafe_ptr() {
514                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
515                 }
516
517                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
518                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
519                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
520                 }
521
522                 let unsized_part = tcx.struct_tail(pointee);
523                 let metadata = match unsized_part.sty {
524                     ty::Foreign(..) => {
525                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
526                     }
527                     ty::Slice(_) | ty::Str => {
528                         scalar_unit(Int(dl.ptr_sized_integer(), false))
529                     }
530                     ty::Dynamic(..) => {
531                         let mut vtable = scalar_unit(Pointer);
532                         vtable.valid_range = 1..=*vtable.valid_range.end();
533                         vtable
534                     }
535                     _ => return Err(LayoutError::Unknown(unsized_part))
536                 };
537
538                 // Effectively a (ptr, meta) tuple.
539                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
540             }
541
542             // Arrays and slices.
543             ty::Array(element, mut count) => {
544                 if count.has_projections() {
545                     count = tcx.normalize_erasing_regions(param_env, count);
546                     if count.has_projections() {
547                         return Err(LayoutError::Unknown(ty));
548                     }
549                 }
550
551                 let element = self.layout_of(element)?;
552                 let count = count.unwrap_usize(tcx);
553                 let size = element.size.checked_mul(count, dl)
554                     .ok_or(LayoutError::SizeOverflow(ty))?;
555
556                 tcx.intern_layout(LayoutDetails {
557                     variants: Variants::Single { index: 0 },
558                     fields: FieldPlacement::Array {
559                         stride: element.size,
560                         count
561                     },
562                     abi: Abi::Aggregate { sized: true },
563                     align: element.align,
564                     size
565                 })
566             }
567             ty::Slice(element) => {
568                 let element = self.layout_of(element)?;
569                 tcx.intern_layout(LayoutDetails {
570                     variants: Variants::Single { index: 0 },
571                     fields: FieldPlacement::Array {
572                         stride: element.size,
573                         count: 0
574                     },
575                     abi: Abi::Aggregate { sized: false },
576                     align: element.align,
577                     size: Size::ZERO
578                 })
579             }
580             ty::Str => {
581                 tcx.intern_layout(LayoutDetails {
582                     variants: Variants::Single { index: 0 },
583                     fields: FieldPlacement::Array {
584                         stride: Size::from_bytes(1),
585                         count: 0
586                     },
587                     abi: Abi::Aggregate { sized: false },
588                     align: dl.i8_align,
589                     size: Size::ZERO
590                 })
591             }
592
593             // Odd unit types.
594             ty::FnDef(..) => {
595                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
596             }
597             ty::Dynamic(..) | ty::Foreign(..) => {
598                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
599                   StructKind::AlwaysSized)?;
600                 match unit.abi {
601                     Abi::Aggregate { ref mut sized } => *sized = false,
602                     _ => bug!()
603                 }
604                 tcx.intern_layout(unit)
605             }
606
607             // Tuples, generators and closures.
608             ty::Generator(def_id, ref substs, _) => {
609                 let tys = substs.field_tys(def_id, tcx);
610                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
611                     &ReprOptions::default(),
612                     StructKind::AlwaysSized)?
613             }
614
615             ty::Closure(def_id, ref substs) => {
616                 let tys = substs.upvar_tys(def_id, tcx);
617                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
618                     &ReprOptions::default(),
619                     StructKind::AlwaysSized)?
620             }
621
622             ty::Tuple(tys) => {
623                 let kind = if tys.len() == 0 {
624                     StructKind::AlwaysSized
625                 } else {
626                     StructKind::MaybeUnsized
627                 };
628
629                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
630                     &ReprOptions::default(), kind)?
631             }
632
633             // SIMD vector types.
634             ty::Adt(def, ..) if def.repr.simd() => {
635                 let element = self.layout_of(ty.simd_type(tcx))?;
636                 let count = ty.simd_size(tcx) as u64;
637                 assert!(count > 0);
638                 let scalar = match element.abi {
639                     Abi::Scalar(ref scalar) => scalar.clone(),
640                     _ => {
641                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
642                                                 a non-machine element type `{}`",
643                                                 ty, element.ty));
644                     }
645                 };
646                 let size = element.size.checked_mul(count, dl)
647                     .ok_or(LayoutError::SizeOverflow(ty))?;
648                 let align = dl.vector_align(size);
649                 let size = size.abi_align(align);
650
651                 tcx.intern_layout(LayoutDetails {
652                     variants: Variants::Single { index: 0 },
653                     fields: FieldPlacement::Array {
654                         stride: element.size,
655                         count
656                     },
657                     abi: Abi::Vector {
658                         element: scalar,
659                         count
660                     },
661                     size,
662                     align,
663                 })
664             }
665
666             // ADTs.
667             ty::Adt(def, substs) => {
668                 // Cache the field layouts.
669                 let variants = def.variants.iter().map(|v| {
670                     v.fields.iter().map(|field| {
671                         self.layout_of(field.ty(tcx, substs))
672                     }).collect::<Result<Vec<_>, _>>()
673                 }).collect::<Result<Vec<_>, _>>()?;
674
675                 if def.is_union() {
676                     let packed = def.repr.packed();
677                     if packed && def.repr.align > 0 {
678                         bug!("Union cannot be packed and aligned");
679                     }
680
681                     let pack = {
682                         let pack = def.repr.pack as u64;
683                         Align::from_bytes(pack, pack).unwrap()
684                     };
685
686                     let mut align = if packed {
687                         dl.i8_align
688                     } else {
689                         dl.aggregate_align
690                     };
691
692                     if def.repr.align > 0 {
693                         let repr_align = def.repr.align as u64;
694                         align = align.max(
695                             Align::from_bytes(repr_align, repr_align).unwrap());
696                     }
697
698                     let mut size = Size::ZERO;
699                     for field in &variants[0] {
700                         assert!(!field.is_unsized());
701
702                         if packed {
703                             let field_pack = field.align.min(pack);
704                             align = align.max(field_pack);
705                         } else {
706                             align = align.max(field.align);
707                         }
708                         size = cmp::max(size, field.size);
709                     }
710
711                     return Ok(tcx.intern_layout(LayoutDetails {
712                         variants: Variants::Single { index: 0 },
713                         fields: FieldPlacement::Union(variants[0].len()),
714                         abi: Abi::Aggregate { sized: true },
715                         align,
716                         size: size.abi_align(align)
717                     }));
718                 }
719
720                 // A variant is absent if it's uninhabited and only has ZST fields.
721                 // Present uninhabited variants only require space for their fields,
722                 // but *not* an encoding of the discriminant (e.g. a tag value).
723                 // See issue #49298 for more details on the need to leave space
724                 // for non-ZST uninhabited data (mostly partial initialization).
725                 let absent = |fields: &[TyLayout]| {
726                     let uninhabited = fields.iter().any(|f| f.abi == Abi::Uninhabited);
727                     let is_zst = fields.iter().all(|f| f.is_zst());
728                     uninhabited && is_zst
729                 };
730                 let (present_first, present_second) = {
731                     let mut present_variants = (0..variants.len()).filter(|&v| {
732                         !absent(&variants[v])
733                     });
734                     (present_variants.next(), present_variants.next())
735                 };
736                 if present_first.is_none() {
737                     // Uninhabited because it has no variants, or only absent ones.
738                     return tcx.layout_raw(param_env.and(tcx.types.never));
739                 }
740
741                 let is_struct = !def.is_enum() ||
742                     // Only one variant is present.
743                     (present_second.is_none() &&
744                     // Representation optimizations are allowed.
745                      !def.repr.inhibit_enum_layout_opt());
746                 if is_struct {
747                     // Struct, or univariant enum equivalent to a struct.
748                     // (Typechecking will reject discriminant-sizing attrs.)
749
750                     let v = present_first.unwrap();
751                     let kind = if def.is_enum() || variants[v].len() == 0 {
752                         StructKind::AlwaysSized
753                     } else {
754                         let param_env = tcx.param_env(def.did);
755                         let last_field = def.variants[v].fields.last().unwrap();
756                         let always_sized = tcx.type_of(last_field.did)
757                           .is_sized(tcx.at(DUMMY_SP), param_env);
758                         if !always_sized { StructKind::MaybeUnsized }
759                         else { StructKind::AlwaysSized }
760                     };
761
762                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
763                     st.variants = Variants::Single { index: v };
764                     // Exclude 0 from the range of a newtype ABI NonZero<T>.
765                     if Some(def.did) == self.tcx.lang_items().non_zero() {
766                         match st.abi {
767                             Abi::Scalar(ref mut scalar) |
768                             Abi::ScalarPair(ref mut scalar, _) => {
769                                 if *scalar.valid_range.start() == 0 {
770                                     scalar.valid_range = 1..=*scalar.valid_range.end();
771                                 }
772                             }
773                             _ => {}
774                         }
775                     }
776                     return Ok(tcx.intern_layout(st));
777                 }
778
779                 // The current code for niche-filling relies on variant indices
780                 // instead of actual discriminants, so dataful enums with
781                 // explicit discriminants (RFC #2363) would misbehave.
782                 let no_explicit_discriminants = def.variants.iter().enumerate()
783                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
784
785                 // Niche-filling enum optimization.
786                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
787                     let mut dataful_variant = None;
788                     let mut niche_variants = usize::max_value()..=0;
789
790                     // Find one non-ZST variant.
791                     'variants: for (v, fields) in variants.iter().enumerate() {
792                         if absent(fields) {
793                             continue 'variants;
794                         }
795                         for f in fields {
796                             if !f.is_zst() {
797                                 if dataful_variant.is_none() {
798                                     dataful_variant = Some(v);
799                                     continue 'variants;
800                                 } else {
801                                     dataful_variant = None;
802                                     break 'variants;
803                                 }
804                             }
805                         }
806                         niche_variants = *niche_variants.start().min(&v)..=v;
807                     }
808
809                     if niche_variants.start() > niche_variants.end() {
810                         dataful_variant = None;
811                     }
812
813                     if let Some(i) = dataful_variant {
814                         let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
815                         for (field_index, &field) in variants[i].iter().enumerate() {
816                             let niche = match self.find_niche(field)? {
817                                 Some(niche) => niche,
818                                 _ => continue,
819                             };
820                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
821                                 Some(pair) => pair,
822                                 None => continue,
823                             };
824
825                             let mut align = dl.aggregate_align;
826                             let st = variants.iter().enumerate().map(|(j, v)| {
827                                 let mut st = univariant_uninterned(v,
828                                     &def.repr, StructKind::AlwaysSized)?;
829                                 st.variants = Variants::Single { index: j };
830
831                                 align = align.max(st.align);
832
833                                 Ok(st)
834                             }).collect::<Result<Vec<_>, _>>()?;
835
836                             let offset = st[i].fields.offset(field_index) + niche.offset;
837                             let size = st[i].size;
838
839                             let mut abi = match st[i].abi {
840                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
841                                 Abi::ScalarPair(ref first, ref second) => {
842                                     // We need to use scalar_unit to reset the
843                                     // valid range to the maximal one for that
844                                     // primitive, because only the niche is
845                                     // guaranteed to be initialised, not the
846                                     // other primitive.
847                                     if offset.bytes() == 0 {
848                                         Abi::ScalarPair(
849                                             niche_scalar.clone(),
850                                             scalar_unit(second.value),
851                                         )
852                                     } else {
853                                         Abi::ScalarPair(
854                                             scalar_unit(first.value),
855                                             niche_scalar.clone(),
856                                         )
857                                     }
858                                 }
859                                 _ => Abi::Aggregate { sized: true },
860                             };
861
862                             if st.iter().all(|v| v.abi == Abi::Uninhabited) {
863                                 abi = Abi::Uninhabited;
864                             }
865
866                             return Ok(tcx.intern_layout(LayoutDetails {
867                                 variants: Variants::NicheFilling {
868                                     dataful_variant: i,
869                                     niche_variants,
870                                     niche: niche_scalar,
871                                     niche_start,
872                                     variants: st,
873                                 },
874                                 fields: FieldPlacement::Arbitrary {
875                                     offsets: vec![offset],
876                                     memory_index: vec![0]
877                                 },
878                                 abi,
879                                 size,
880                                 align,
881                             }));
882                         }
883                     }
884                 }
885
886                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
887                 let discr_type = def.repr.discr_type();
888                 let bits = Integer::from_attr(tcx, discr_type).size().bits();
889                 for (i, discr) in def.discriminants(tcx).enumerate() {
890                     if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
891                         continue;
892                     }
893                     let mut x = discr.val as i128;
894                     if discr_type.is_signed() {
895                         // sign extend the raw representation to be an i128
896                         x = (x << (128 - bits)) >> (128 - bits);
897                     }
898                     if x < min { min = x; }
899                     if x > max { max = x; }
900                 }
901                 // We might have no inhabited variants, so pretend there's at least one.
902                 if (min, max) == (i128::max_value(), i128::min_value()) {
903                     min = 0;
904                     max = 0;
905                 }
906                 assert!(min <= max, "discriminant range is {}...{}", min, max);
907                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
908
909                 let mut align = dl.aggregate_align;
910                 let mut size = Size::ZERO;
911
912                 // We're interested in the smallest alignment, so start large.
913                 let mut start_align = Align::from_bytes(256, 256).unwrap();
914                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
915
916                 // repr(C) on an enum tells us to make a (tag, union) layout,
917                 // so we need to grow the prefix alignment to be at least
918                 // the alignment of the union. (This value is used both for
919                 // determining the alignment of the overall enum, and the
920                 // determining the alignment of the payload after the tag.)
921                 let mut prefix_align = min_ity.align(dl);
922                 if def.repr.c() {
923                     for fields in &variants {
924                         for field in fields {
925                             prefix_align = prefix_align.max(field.align);
926                         }
927                     }
928                 }
929
930                 // Create the set of structs that represent each variant.
931                 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
932                     let mut st = univariant_uninterned(&field_layouts,
933                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
934                     st.variants = Variants::Single { index: i };
935                     // Find the first field we can't move later
936                     // to make room for a larger discriminant.
937                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
938                         if !field.is_zst() || field.align.abi() != 1 {
939                             start_align = start_align.min(field.align);
940                             break;
941                         }
942                     }
943                     size = cmp::max(size, st.size);
944                     align = align.max(st.align);
945                     Ok(st)
946                 }).collect::<Result<Vec<_>, _>>()?;
947
948                 // Align the maximum variant size to the largest alignment.
949                 size = size.abi_align(align);
950
951                 if size.bytes() >= dl.obj_size_bound() {
952                     return Err(LayoutError::SizeOverflow(ty));
953                 }
954
955                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
956                 if typeck_ity < min_ity {
957                     // It is a bug if Layout decided on a greater discriminant size than typeck for
958                     // some reason at this point (based on values discriminant can take on). Mostly
959                     // because this discriminant will be loaded, and then stored into variable of
960                     // type calculated by typeck. Consider such case (a bug): typeck decided on
961                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
962                     // discriminant values. That would be a bug, because then, in codegen, in order
963                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
964                     // space necessary to represent would have to be discarded (or layout is wrong
965                     // on thinking it needs 16 bits)
966                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
967                          min_ity, typeck_ity);
968                     // However, it is fine to make discr type however large (as an optimisation)
969                     // after this point â€“ we’ll just truncate the value we load in codegen.
970                 }
971
972                 // Check to see if we should use a different type for the
973                 // discriminant. We can safely use a type with the same size
974                 // as the alignment of the first field of each variant.
975                 // We increase the size of the discriminant to avoid LLVM copying
976                 // padding when it doesn't need to. This normally causes unaligned
977                 // load/stores and excessive memcpy/memset operations. By using a
978                 // bigger integer size, LLVM can be sure about its contents and
979                 // won't be so conservative.
980
981                 // Use the initial field alignment
982                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
983                     min_ity
984                 } else {
985                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
986                 };
987
988                 // If the alignment is not larger than the chosen discriminant size,
989                 // don't use the alignment as the final size.
990                 if ity <= min_ity {
991                     ity = min_ity;
992                 } else {
993                     // Patch up the variants' first few fields.
994                     let old_ity_size = min_ity.size();
995                     let new_ity_size = ity.size();
996                     for variant in &mut layout_variants {
997                         match variant.fields {
998                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
999                                 for i in offsets {
1000                                     if *i <= old_ity_size {
1001                                         assert_eq!(*i, old_ity_size);
1002                                         *i = new_ity_size;
1003                                     }
1004                                 }
1005                                 // We might be making the struct larger.
1006                                 if variant.size <= old_ity_size {
1007                                     variant.size = new_ity_size;
1008                                 }
1009                             }
1010                             _ => bug!()
1011                         }
1012                     }
1013                 }
1014
1015                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1016                 let tag = Scalar {
1017                     value: Int(ity, signed),
1018                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1019                 };
1020                 let mut abi = Abi::Aggregate { sized: true };
1021                 if tag.value.size(dl) == size {
1022                     abi = Abi::Scalar(tag.clone());
1023                 } else {
1024                     // Try to use a ScalarPair for all tagged enums.
1025                     let mut common_prim = None;
1026                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1027                         let offsets = match layout_variant.fields {
1028                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1029                             _ => bug!(),
1030                         };
1031                         let mut fields = field_layouts
1032                             .iter()
1033                             .zip(offsets)
1034                             .filter(|p| !p.0.is_zst());
1035                         let (field, offset) = match (fields.next(), fields.next()) {
1036                             (None, None) => continue,
1037                             (Some(pair), None) => pair,
1038                             _ => {
1039                                 common_prim = None;
1040                                 break;
1041                             }
1042                         };
1043                         let prim = match field.details.abi {
1044                             Abi::Scalar(ref scalar) => scalar.value,
1045                             _ => {
1046                                 common_prim = None;
1047                                 break;
1048                             }
1049                         };
1050                         if let Some(pair) = common_prim {
1051                             // This is pretty conservative. We could go fancier
1052                             // by conflating things like i32 and u32, or even
1053                             // realising that (u8, u8) could just cohabit with
1054                             // u16 or even u32.
1055                             if pair != (prim, offset) {
1056                                 common_prim = None;
1057                                 break;
1058                             }
1059                         } else {
1060                             common_prim = Some((prim, offset));
1061                         }
1062                     }
1063                     if let Some((prim, offset)) = common_prim {
1064                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1065                         let pair_offsets = match pair.fields {
1066                             FieldPlacement::Arbitrary {
1067                                 ref offsets,
1068                                 ref memory_index
1069                             } => {
1070                                 assert_eq!(memory_index, &[0, 1]);
1071                                 offsets
1072                             }
1073                             _ => bug!()
1074                         };
1075                         if pair_offsets[0] == Size::ZERO &&
1076                             pair_offsets[1] == *offset &&
1077                             align == pair.align &&
1078                             size == pair.size {
1079                             // We can use `ScalarPair` only when it matches our
1080                             // already computed layout (including `#[repr(C)]`).
1081                             abi = pair.abi;
1082                         }
1083                     }
1084                 }
1085
1086                 if layout_variants.iter().all(|v| v.abi == Abi::Uninhabited) {
1087                     abi = Abi::Uninhabited;
1088                 }
1089
1090                 tcx.intern_layout(LayoutDetails {
1091                     variants: Variants::Tagged {
1092                         tag,
1093                         variants: layout_variants,
1094                     },
1095                     fields: FieldPlacement::Arbitrary {
1096                         offsets: vec![Size::ZERO],
1097                         memory_index: vec![0]
1098                     },
1099                     abi,
1100                     align,
1101                     size
1102                 })
1103             }
1104
1105             // Types with no meaningful known layout.
1106             ty::Projection(_) | ty::Opaque(..) => {
1107                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1108                 if ty == normalized {
1109                     return Err(LayoutError::Unknown(ty));
1110                 }
1111                 tcx.layout_raw(param_env.and(normalized))?
1112             }
1113             ty::GeneratorWitness(..) | ty::Infer(_) => {
1114                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1115             }
1116             ty::Param(_) | ty::Error => {
1117                 return Err(LayoutError::Unknown(ty));
1118             }
1119         })
1120     }
1121
1122     /// This is invoked by the `layout_raw` query to record the final
1123     /// layout of each type.
1124     #[inline]
1125     fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1126         // If we are running with `-Zprint-type-sizes`, record layouts for
1127         // dumping later. Ignore layouts that are done with non-empty
1128         // environments or non-monomorphic layouts, as the user only wants
1129         // to see the stuff resulting from the final codegen session.
1130         if
1131             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1132             layout.ty.has_param_types() ||
1133             layout.ty.has_self_ty() ||
1134             !self.param_env.caller_bounds.is_empty()
1135         {
1136             return;
1137         }
1138
1139         self.record_layout_for_printing_outlined(layout)
1140     }
1141
1142     fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1143         // (delay format until we actually need it)
1144         let record = |kind, packed, opt_discr_size, variants| {
1145             let type_desc = format!("{:?}", layout.ty);
1146             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1147                                                                    type_desc,
1148                                                                    layout.align,
1149                                                                    layout.size,
1150                                                                    packed,
1151                                                                    opt_discr_size,
1152                                                                    variants);
1153         };
1154
1155         let adt_def = match layout.ty.sty {
1156             ty::Adt(ref adt_def, _) => {
1157                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1158                 adt_def
1159             }
1160
1161             ty::Closure(..) => {
1162                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1163                 record(DataTypeKind::Closure, false, None, vec![]);
1164                 return;
1165             }
1166
1167             _ => {
1168                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1169                 return;
1170             }
1171         };
1172
1173         let adt_kind = adt_def.adt_kind();
1174         let adt_packed = adt_def.repr.packed();
1175
1176         let build_variant_info = |n: Option<ast::Name>,
1177                                   flds: &[ast::Name],
1178                                   layout: TyLayout<'tcx>| {
1179             let mut min_size = Size::ZERO;
1180             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1181                 match layout.field(self, i) {
1182                     Err(err) => {
1183                         bug!("no layout found for field {}: `{:?}`", name, err);
1184                     }
1185                     Ok(field_layout) => {
1186                         let offset = layout.fields.offset(i);
1187                         let field_end = offset + field_layout.size;
1188                         if min_size < field_end {
1189                             min_size = field_end;
1190                         }
1191                         session::FieldInfo {
1192                             name: name.to_string(),
1193                             offset: offset.bytes(),
1194                             size: field_layout.size.bytes(),
1195                             align: field_layout.align.abi(),
1196                         }
1197                     }
1198                 }
1199             }).collect();
1200
1201             session::VariantInfo {
1202                 name: n.map(|n|n.to_string()),
1203                 kind: if layout.is_unsized() {
1204                     session::SizeKind::Min
1205                 } else {
1206                     session::SizeKind::Exact
1207                 },
1208                 align: layout.align.abi(),
1209                 size: if min_size.bytes() == 0 {
1210                     layout.size.bytes()
1211                 } else {
1212                     min_size.bytes()
1213                 },
1214                 fields: field_info,
1215             }
1216         };
1217
1218         match layout.variants {
1219             Variants::Single { index } => {
1220                 debug!("print-type-size `{:#?}` variant {}",
1221                        layout, adt_def.variants[index].name);
1222                 if !adt_def.variants.is_empty() {
1223                     let variant_def = &adt_def.variants[index];
1224                     let fields: Vec<_> =
1225                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1226                     record(adt_kind.into(),
1227                            adt_packed,
1228                            None,
1229                            vec![build_variant_info(Some(variant_def.name),
1230                                                    &fields,
1231                                                    layout)]);
1232                 } else {
1233                     // (This case arises for *empty* enums; so give it
1234                     // zero variants.)
1235                     record(adt_kind.into(), adt_packed, None, vec![]);
1236                 }
1237             }
1238
1239             Variants::NicheFilling { .. } |
1240             Variants::Tagged { .. } => {
1241                 debug!("print-type-size `{:#?}` adt general variants def {}",
1242                        layout.ty, adt_def.variants.len());
1243                 let variant_infos: Vec<_> =
1244                     adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1245                         let fields: Vec<_> =
1246                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1247                         build_variant_info(Some(variant_def.name),
1248                                             &fields,
1249                                             layout.for_variant(self, i))
1250                     })
1251                     .collect();
1252                 record(adt_kind.into(), adt_packed, match layout.variants {
1253                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1254                     _ => None
1255                 }, variant_infos);
1256             }
1257         }
1258     }
1259 }
1260
1261 /// Type size "skeleton", i.e. the only information determining a type's size.
1262 /// While this is conservative, (aside from constant sizes, only pointers,
1263 /// newtypes thereof and null pointer optimized enums are allowed), it is
1264 /// enough to statically check common usecases of transmute.
1265 #[derive(Copy, Clone, Debug)]
1266 pub enum SizeSkeleton<'tcx> {
1267     /// Any statically computable Layout.
1268     Known(Size),
1269
1270     /// A potentially-fat pointer.
1271     Pointer {
1272         /// If true, this pointer is never null.
1273         non_zero: bool,
1274         /// The type which determines the unsized metadata, if any,
1275         /// of this pointer. Either a type parameter or a projection
1276         /// depending on one, with regions erased.
1277         tail: Ty<'tcx>
1278     }
1279 }
1280
1281 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1282     pub fn compute(ty: Ty<'tcx>,
1283                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1284                    param_env: ty::ParamEnv<'tcx>)
1285                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1286         debug_assert!(!ty.has_infer_types());
1287
1288         // First try computing a static layout.
1289         let err = match tcx.layout_of(param_env.and(ty)) {
1290             Ok(layout) => {
1291                 return Ok(SizeSkeleton::Known(layout.size));
1292             }
1293             Err(err) => err
1294         };
1295
1296         match ty.sty {
1297             ty::Ref(_, pointee, _) |
1298             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1299                 let non_zero = !ty.is_unsafe_ptr();
1300                 let tail = tcx.struct_tail(pointee);
1301                 match tail.sty {
1302                     ty::Param(_) | ty::Projection(_) => {
1303                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1304                         Ok(SizeSkeleton::Pointer {
1305                             non_zero,
1306                             tail: tcx.erase_regions(&tail)
1307                         })
1308                     }
1309                     _ => {
1310                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1311                               tail `{}` is not a type parameter or a projection",
1312                              ty, err, tail)
1313                     }
1314                 }
1315             }
1316
1317             ty::Adt(def, substs) => {
1318                 // Only newtypes and enums w/ nullable pointer optimization.
1319                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1320                     return Err(err);
1321                 }
1322
1323                 // Get a zero-sized variant or a pointer newtype.
1324                 let zero_or_ptr_variant = |i: usize| {
1325                     let fields = def.variants[i].fields.iter().map(|field| {
1326                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1327                     });
1328                     let mut ptr = None;
1329                     for field in fields {
1330                         let field = field?;
1331                         match field {
1332                             SizeSkeleton::Known(size) => {
1333                                 if size.bytes() > 0 {
1334                                     return Err(err);
1335                                 }
1336                             }
1337                             SizeSkeleton::Pointer {..} => {
1338                                 if ptr.is_some() {
1339                                     return Err(err);
1340                                 }
1341                                 ptr = Some(field);
1342                             }
1343                         }
1344                     }
1345                     Ok(ptr)
1346                 };
1347
1348                 let v0 = zero_or_ptr_variant(0)?;
1349                 // Newtype.
1350                 if def.variants.len() == 1 {
1351                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1352                         return Ok(SizeSkeleton::Pointer {
1353                             non_zero: non_zero ||
1354                                 Some(def.did) == tcx.lang_items().non_zero(),
1355                             tail,
1356                         });
1357                     } else {
1358                         return Err(err);
1359                     }
1360                 }
1361
1362                 let v1 = zero_or_ptr_variant(1)?;
1363                 // Nullable pointer enum optimization.
1364                 match (v0, v1) {
1365                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1366                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1367                         Ok(SizeSkeleton::Pointer {
1368                             non_zero: false,
1369                             tail,
1370                         })
1371                     }
1372                     _ => Err(err)
1373                 }
1374             }
1375
1376             ty::Projection(_) | ty::Opaque(..) => {
1377                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1378                 if ty == normalized {
1379                     Err(err)
1380                 } else {
1381                     SizeSkeleton::compute(normalized, tcx, param_env)
1382                 }
1383             }
1384
1385             _ => Err(err)
1386         }
1387     }
1388
1389     pub fn same_size(self, other: SizeSkeleton) -> bool {
1390         match (self, other) {
1391             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1392             (SizeSkeleton::Pointer { tail: a, .. },
1393              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1394             _ => false
1395         }
1396     }
1397 }
1398
1399 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1400     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1401 }
1402
1403 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1404     fn data_layout(&self) -> &TargetDataLayout {
1405         &self.data_layout
1406     }
1407 }
1408
1409 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1410     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1411         self.global_tcx()
1412     }
1413 }
1414
1415 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1416     fn data_layout(&self) -> &TargetDataLayout {
1417         self.tcx.data_layout()
1418     }
1419 }
1420
1421 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1422     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1423         self.tcx.tcx()
1424     }
1425 }
1426
1427 pub trait MaybeResult<T> {
1428     fn from_ok(x: T) -> Self;
1429     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1430 }
1431
1432 impl<T> MaybeResult<T> for T {
1433     fn from_ok(x: T) -> Self {
1434         x
1435     }
1436     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1437         f(self)
1438     }
1439 }
1440
1441 impl<T, E> MaybeResult<T> for Result<T, E> {
1442     fn from_ok(x: T) -> Self {
1443         Ok(x)
1444     }
1445     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1446         self.map(f)
1447     }
1448 }
1449
1450 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1451
1452 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1453     type Ty = Ty<'tcx>;
1454     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1455
1456     /// Computes the layout of a type. Note that this implicitly
1457     /// executes in "reveal all" mode.
1458     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1459         let param_env = self.param_env.with_reveal_all();
1460         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1461         let details = self.tcx.layout_raw(param_env.and(ty))?;
1462         let layout = TyLayout {
1463             ty,
1464             details
1465         };
1466
1467         // NB: This recording is normally disabled; when enabled, it
1468         // can however trigger recursive invocations of `layout_of`.
1469         // Therefore, we execute it *after* the main query has
1470         // completed, to avoid problems around recursive structures
1471         // and the like. (Admittedly, I wasn't able to reproduce a problem
1472         // here, but it seems like the right thing to do. -nmatsakis)
1473         self.record_layout_for_printing(layout);
1474
1475         Ok(layout)
1476     }
1477 }
1478
1479 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1480     type Ty = Ty<'tcx>;
1481     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1482
1483     /// Computes the layout of a type. Note that this implicitly
1484     /// executes in "reveal all" mode.
1485     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1486         let param_env = self.param_env.with_reveal_all();
1487         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1488         let details = self.tcx.layout_raw(param_env.and(ty))?;
1489         let layout = TyLayout {
1490             ty,
1491             details
1492         };
1493
1494         // NB: This recording is normally disabled; when enabled, it
1495         // can however trigger recursive invocations of `layout_of`.
1496         // Therefore, we execute it *after* the main query has
1497         // completed, to avoid problems around recursive structures
1498         // and the like. (Admittedly, I wasn't able to reproduce a problem
1499         // here, but it seems like the right thing to do. -nmatsakis)
1500         let cx = LayoutCx {
1501             tcx: *self.tcx,
1502             param_env: self.param_env
1503         };
1504         cx.record_layout_for_printing(layout);
1505
1506         Ok(layout)
1507     }
1508 }
1509
1510 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1511 impl TyCtxt<'a, 'tcx, '_> {
1512     /// Computes the layout of a type. Note that this implicitly
1513     /// executes in "reveal all" mode.
1514     #[inline]
1515     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1516                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1517         let cx = LayoutCx {
1518             tcx: self.global_tcx(),
1519             param_env: param_env_and_ty.param_env
1520         };
1521         cx.layout_of(param_env_and_ty.value)
1522     }
1523 }
1524
1525 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1526     /// Computes the layout of a type. Note that this implicitly
1527     /// executes in "reveal all" mode.
1528     #[inline]
1529     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1530                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1531         let cx = LayoutCx {
1532             tcx: self.global_tcx().at(self.span),
1533             param_env: param_env_and_ty.param_env
1534         };
1535         cx.layout_of(param_env_and_ty.value)
1536     }
1537 }
1538
1539 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1540     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1541           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1542 {
1543     fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1544         let details = match this.variants {
1545             Variants::Single { index } if index == variant_index => this.details,
1546
1547             Variants::Single { index } => {
1548                 // Deny calling for_variant more than once for non-Single enums.
1549                 cx.layout_of(this.ty).map_same(|layout| {
1550                     assert_eq!(layout.variants, Variants::Single { index });
1551                     layout
1552                 });
1553
1554                 let fields = match this.ty.sty {
1555                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1556                     _ => bug!()
1557                 };
1558                 let tcx = cx.tcx();
1559                 tcx.intern_layout(LayoutDetails {
1560                     variants: Variants::Single { index: variant_index },
1561                     fields: FieldPlacement::Union(fields),
1562                     abi: Abi::Uninhabited,
1563                     align: tcx.data_layout.i8_align,
1564                     size: Size::ZERO
1565                 })
1566             }
1567
1568             Variants::NicheFilling { ref variants, .. } |
1569             Variants::Tagged { ref variants, .. } => {
1570                 &variants[variant_index]
1571             }
1572         };
1573
1574         assert_eq!(details.variants, Variants::Single { index: variant_index });
1575
1576         TyLayout {
1577             ty: this.ty,
1578             details
1579         }
1580     }
1581
1582     fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1583         let tcx = cx.tcx();
1584         cx.layout_of(match this.ty.sty {
1585             ty::Bool |
1586             ty::Char |
1587             ty::Int(_) |
1588             ty::Uint(_) |
1589             ty::Float(_) |
1590             ty::FnPtr(_) |
1591             ty::Never |
1592             ty::FnDef(..) |
1593             ty::GeneratorWitness(..) |
1594             ty::Foreign(..) |
1595             ty::Dynamic(..) => {
1596                 bug!("TyLayout::field_type({:?}): not applicable", this)
1597             }
1598
1599             // Potentially-fat pointers.
1600             ty::Ref(_, pointee, _) |
1601             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1602                 assert!(i < this.fields.count());
1603
1604                 // Reuse the fat *T type as its own thin pointer data field.
1605                 // This provides information about e.g. DST struct pointees
1606                 // (which may have no non-DST form), and will work as long
1607                 // as the `Abi` or `FieldPlacement` is checked by users.
1608                 if i == 0 {
1609                     let nil = tcx.mk_nil();
1610                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1611                         tcx.mk_mut_ptr(nil)
1612                     } else {
1613                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1614                     };
1615                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1616                         ptr_layout.ty = this.ty;
1617                         ptr_layout
1618                     });
1619                 }
1620
1621                 match tcx.struct_tail(pointee).sty {
1622                     ty::Slice(_) |
1623                     ty::Str => tcx.types.usize,
1624                     ty::Dynamic(_, _) => {
1625                         tcx.mk_imm_ref(
1626                             tcx.types.re_static,
1627                             tcx.mk_array(tcx.types.usize, 3),
1628                         )
1629                         /* FIXME use actual fn pointers
1630                         Warning: naively computing the number of entries in the
1631                         vtable by counting the methods on the trait + methods on
1632                         all parent traits does not work, because some methods can
1633                         be not object safe and thus excluded from the vtable.
1634                         Increase this counter if you tried to implement this but
1635                         failed to do it without duplicating a lot of code from
1636                         other places in the compiler: 2
1637                         tcx.mk_tup(&[
1638                             tcx.mk_array(tcx.types.usize, 3),
1639                             tcx.mk_array(Option<fn()>),
1640                         ])
1641                         */
1642                     }
1643                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1644                 }
1645             }
1646
1647             // Arrays and slices.
1648             ty::Array(element, _) |
1649             ty::Slice(element) => element,
1650             ty::Str => tcx.types.u8,
1651
1652             // Tuples, generators and closures.
1653             ty::Closure(def_id, ref substs) => {
1654                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1655             }
1656
1657             ty::Generator(def_id, ref substs, _) => {
1658                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1659             }
1660
1661             ty::Tuple(tys) => tys[i],
1662
1663             // SIMD vector types.
1664             ty::Adt(def, ..) if def.repr.simd() => {
1665                 this.ty.simd_type(tcx)
1666             }
1667
1668             // ADTs.
1669             ty::Adt(def, substs) => {
1670                 match this.variants {
1671                     Variants::Single { index } => {
1672                         def.variants[index].fields[i].ty(tcx, substs)
1673                     }
1674
1675                     // Discriminant field for enums (where applicable).
1676                     Variants::Tagged { tag: ref discr, .. } |
1677                     Variants::NicheFilling { niche: ref discr, .. } => {
1678                         assert_eq!(i, 0);
1679                         let layout = LayoutDetails::scalar(tcx, discr.clone());
1680                         return MaybeResult::from_ok(TyLayout {
1681                             details: tcx.intern_layout(layout),
1682                             ty: discr.value.to_ty(tcx)
1683                         });
1684                     }
1685                 }
1686             }
1687
1688             ty::Projection(_) | ty::Opaque(..) | ty::Param(_) |
1689             ty::Infer(_) | ty::Error => {
1690                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1691             }
1692         })
1693     }
1694 }
1695
1696 struct Niche {
1697     offset: Size,
1698     scalar: Scalar,
1699     available: u128,
1700 }
1701
1702 impl Niche {
1703     fn reserve<'a, 'tcx>(
1704         &self,
1705         cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1706         count: u128,
1707     ) -> Option<(u128, Scalar)> {
1708         if count > self.available {
1709             return None;
1710         }
1711         let Scalar { value, valid_range: ref v } = self.scalar;
1712         let bits = value.size(cx).bits();
1713         assert!(bits <= 128);
1714         let max_value = !0u128 >> (128 - bits);
1715         let start = v.end().wrapping_add(1) & max_value;
1716         let end = v.end().wrapping_add(count) & max_value;
1717         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1718     }
1719 }
1720
1721 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1722     /// Find the offset of a niche leaf field, starting from
1723     /// the given type and recursing through aggregates.
1724     // FIXME(eddyb) traverse already optimized enums.
1725     fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1726         let scalar_niche = |scalar: &Scalar, offset| {
1727             let Scalar { value, valid_range: ref v } = *scalar;
1728
1729             let bits = value.size(self).bits();
1730             assert!(bits <= 128);
1731             let max_value = !0u128 >> (128 - bits);
1732
1733             // Find out how many values are outside the valid range.
1734             let available = if v.start() <= v.end() {
1735                 v.start() + (max_value - v.end())
1736             } else {
1737                 v.start() - v.end() - 1
1738             };
1739
1740             // Give up if there is no niche value available.
1741             if available == 0 {
1742                 return None;
1743             }
1744
1745             Some(Niche { offset, scalar: scalar.clone(), available })
1746         };
1747
1748         // Locals variables which live across yields are stored
1749         // in the generator type as fields. These may be uninitialized
1750         // so we don't look for niches there.
1751         if let ty::Generator(..) = layout.ty.sty {
1752             return Ok(None);
1753         }
1754
1755         match layout.abi {
1756             Abi::Scalar(ref scalar) => {
1757                 return Ok(scalar_niche(scalar, Size::ZERO));
1758             }
1759             Abi::ScalarPair(ref a, ref b) => {
1760                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1761                 // returns the last maximum.
1762                 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1763                     .chain(iter::once((a, Size::ZERO)))
1764                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1765                     .max_by_key(|niche| niche.available);
1766                 return Ok(niche);
1767             }
1768             Abi::Vector { ref element, .. } => {
1769                 return Ok(scalar_niche(element, Size::ZERO));
1770             }
1771             _ => {}
1772         }
1773
1774         // Perhaps one of the fields is non-zero, let's recurse and find out.
1775         if let FieldPlacement::Union(_) = layout.fields {
1776             // Only Rust enums have safe-to-inspect fields
1777             // (a discriminant), other unions are unsafe.
1778             if let Variants::Single { .. } = layout.variants {
1779                 return Ok(None);
1780             }
1781         }
1782         if let FieldPlacement::Array { .. } = layout.fields {
1783             if layout.fields.count() > 0 {
1784                 return self.find_niche(layout.field(self, 0)?);
1785             } else {
1786                 return Ok(None);
1787             }
1788         }
1789         let mut niche = None;
1790         let mut available = 0;
1791         for i in 0..layout.fields.count() {
1792             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1793                 if c.available > available {
1794                     available = c.available;
1795                     c.offset += layout.fields.offset(i);
1796                     niche = Some(c);
1797                 }
1798             }
1799         }
1800         Ok(niche)
1801     }
1802 }
1803
1804 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1805     fn hash_stable<W: StableHasherResult>(&self,
1806                                           hcx: &mut StableHashingContext<'a>,
1807                                           hasher: &mut StableHasher<W>) {
1808         use ty::layout::Variants::*;
1809         mem::discriminant(self).hash_stable(hcx, hasher);
1810
1811         match *self {
1812             Single { index } => {
1813                 index.hash_stable(hcx, hasher);
1814             }
1815             Tagged {
1816                 ref tag,
1817                 ref variants,
1818             } => {
1819                 tag.hash_stable(hcx, hasher);
1820                 variants.hash_stable(hcx, hasher);
1821             }
1822             NicheFilling {
1823                 dataful_variant,
1824                 ref niche_variants,
1825                 ref niche,
1826                 niche_start,
1827                 ref variants,
1828             } => {
1829                 dataful_variant.hash_stable(hcx, hasher);
1830                 niche_variants.start().hash_stable(hcx, hasher);
1831                 niche_variants.end().hash_stable(hcx, hasher);
1832                 niche.hash_stable(hcx, hasher);
1833                 niche_start.hash_stable(hcx, hasher);
1834                 variants.hash_stable(hcx, hasher);
1835             }
1836         }
1837     }
1838 }
1839
1840 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1841     fn hash_stable<W: StableHasherResult>(&self,
1842                                           hcx: &mut StableHashingContext<'a>,
1843                                           hasher: &mut StableHasher<W>) {
1844         use ty::layout::FieldPlacement::*;
1845         mem::discriminant(self).hash_stable(hcx, hasher);
1846
1847         match *self {
1848             Union(count) => {
1849                 count.hash_stable(hcx, hasher);
1850             }
1851             Array { count, stride } => {
1852                 count.hash_stable(hcx, hasher);
1853                 stride.hash_stable(hcx, hasher);
1854             }
1855             Arbitrary { ref offsets, ref memory_index } => {
1856                 offsets.hash_stable(hcx, hasher);
1857                 memory_index.hash_stable(hcx, hasher);
1858             }
1859         }
1860     }
1861 }
1862
1863 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1864     fn hash_stable<W: StableHasherResult>(&self,
1865                                           hcx: &mut StableHashingContext<'a>,
1866                                           hasher: &mut StableHasher<W>) {
1867         use ty::layout::Abi::*;
1868         mem::discriminant(self).hash_stable(hcx, hasher);
1869
1870         match *self {
1871             Uninhabited => {}
1872             Scalar(ref value) => {
1873                 value.hash_stable(hcx, hasher);
1874             }
1875             ScalarPair(ref a, ref b) => {
1876                 a.hash_stable(hcx, hasher);
1877                 b.hash_stable(hcx, hasher);
1878             }
1879             Vector { ref element, count } => {
1880                 element.hash_stable(hcx, hasher);
1881                 count.hash_stable(hcx, hasher);
1882             }
1883             Aggregate { sized } => {
1884                 sized.hash_stable(hcx, hasher);
1885             }
1886         }
1887     }
1888 }
1889
1890 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1891     fn hash_stable<W: StableHasherResult>(&self,
1892                                           hcx: &mut StableHashingContext<'a>,
1893                                           hasher: &mut StableHasher<W>) {
1894         let Scalar { value, ref valid_range } = *self;
1895         value.hash_stable(hcx, hasher);
1896         valid_range.start().hash_stable(hcx, hasher);
1897         valid_range.end().hash_stable(hcx, hasher);
1898     }
1899 }
1900
1901 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1902     variants,
1903     fields,
1904     abi,
1905     size,
1906     align
1907 });
1908
1909 impl_stable_hash_for!(enum ::ty::layout::Integer {
1910     I8,
1911     I16,
1912     I32,
1913     I64,
1914     I128
1915 });
1916
1917 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1918     Int(integer, signed),
1919     Float(fty),
1920     Pointer
1921 });
1922
1923 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1924     fn hash_stable<W: StableHasherResult>(&self,
1925                                           hcx: &mut StableHashingContext<'gcx>,
1926                                           hasher: &mut StableHasher<W>) {
1927         self.abi().hash_stable(hcx, hasher);
1928         self.pref().hash_stable(hcx, hasher);
1929     }
1930 }
1931
1932 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1933     fn hash_stable<W: StableHasherResult>(&self,
1934                                           hcx: &mut StableHashingContext<'gcx>,
1935                                           hasher: &mut StableHasher<W>) {
1936         self.bytes().hash_stable(hcx, hasher);
1937     }
1938 }
1939
1940 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1941 {
1942     fn hash_stable<W: StableHasherResult>(&self,
1943                                           hcx: &mut StableHashingContext<'a>,
1944                                           hasher: &mut StableHasher<W>) {
1945         use ty::layout::LayoutError::*;
1946         mem::discriminant(self).hash_stable(hcx, hasher);
1947
1948         match *self {
1949             Unknown(t) |
1950             SizeOverflow(t) => t.hash_stable(hcx, hasher)
1951         }
1952     }
1953 }