]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
don't elide lifetimes in paths in librustc/
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23 use std::ops::Bound;
24
25 use ich::StableHashingContext;
26 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
27                                            StableHasherResult};
28
29 pub use rustc_target::abi::*;
30
31 pub trait IntegerExt {
32     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
33     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
34     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
35                   ty: Ty<'tcx>,
36                   repr: &ReprOptions,
37                   min: i128,
38                   max: i128)
39                   -> (Integer, bool);
40 }
41
42 impl IntegerExt for Integer {
43     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
44         match (*self, signed) {
45             (I8, false) => tcx.types.u8,
46             (I16, false) => tcx.types.u16,
47             (I32, false) => tcx.types.u32,
48             (I64, false) => tcx.types.u64,
49             (I128, false) => tcx.types.u128,
50             (I8, true) => tcx.types.i8,
51             (I16, true) => tcx.types.i16,
52             (I32, true) => tcx.types.i32,
53             (I64, true) => tcx.types.i64,
54             (I128, true) => tcx.types.i128,
55         }
56     }
57
58     /// Get the Integer type from an attr::IntType.
59     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
60         let dl = cx.data_layout();
61
62         match ity {
63             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69                 dl.ptr_sized_integer()
70             }
71         }
72     }
73
74     /// Find the appropriate Integer type and signedness for the given
75     /// signed discriminant range and #[repr] attribute.
76     /// N.B.: u128 values above i128::MAX will be treated as signed, but
77     /// that shouldn't affect anything, other than maybe debuginfo.
78     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
79                   ty: Ty<'tcx>,
80                   repr: &ReprOptions,
81                   min: i128,
82                   max: i128)
83                   -> (Integer, bool) {
84         // Theoretically, negative values could be larger in unsigned representation
85         // than the unsigned representation of the signed minimum. However, if there
86         // are any negative values, the only valid unsigned representation is u128
87         // which can fit all i128 values, so the result remains unaffected.
88         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
89         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
90
91         let mut min_from_extern = None;
92         let min_default = I8;
93
94         if let Some(ity) = repr.int {
95             let discr = Integer::from_attr(tcx, ity);
96             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
97             if discr < fit {
98                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
99                   discriminant range of enum `{}", ty)
100             }
101             return (discr, ity.is_signed());
102         }
103
104         if repr.c() {
105             match &tcx.sess.target.target.arch[..] {
106                 // WARNING: the ARM EABI has two variants; the one corresponding
107                 // to `at_least == I32` appears to be used on Linux and NetBSD,
108                 // but some systems may use the variant corresponding to no
109                 // lower bound.  However, we don't run on those yet...?
110                 "arm" => min_from_extern = Some(I32),
111                 _ => min_from_extern = Some(I32),
112             }
113         }
114
115         let at_least = min_from_extern.unwrap_or(min_default);
116
117         // If there are no negative values, we can use the unsigned fit.
118         if min >= 0 {
119             (cmp::max(unsigned_fit, at_least), false)
120         } else {
121             (cmp::max(signed_fit, at_least), true)
122         }
123     }
124 }
125
126 pub trait PrimitiveExt {
127     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
128 }
129
130 impl PrimitiveExt for Primitive {
131     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
132         match *self {
133             Int(i, signed) => i.to_ty(tcx, signed),
134             Float(FloatTy::F32) => tcx.types.f32,
135             Float(FloatTy::F64) => tcx.types.f64,
136             Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
137         }
138     }
139 }
140
141 /// The first half of a fat pointer.
142 ///
143 /// - For a trait object, this is the address of the box.
144 /// - For a slice, this is the base address.
145 pub const FAT_PTR_ADDR: usize = 0;
146
147 /// The second half of a fat pointer.
148 ///
149 /// - For a trait object, this is the address of the vtable.
150 /// - For a slice, this is the length.
151 pub const FAT_PTR_EXTRA: usize = 1;
152
153 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
154 pub enum LayoutError<'tcx> {
155     Unknown(Ty<'tcx>),
156     SizeOverflow(Ty<'tcx>)
157 }
158
159 impl<'tcx> fmt::Display for LayoutError<'tcx> {
160     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
161         match *self {
162             LayoutError::Unknown(ty) => {
163                 write!(f, "the type `{:?}` has an unknown layout", ty)
164             }
165             LayoutError::SizeOverflow(ty) => {
166                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
167             }
168         }
169     }
170 }
171
172 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
173                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
174                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
175 {
176     ty::tls::with_related_context(tcx, move |icx| {
177         let rec_limit = *tcx.sess.recursion_limit.get();
178         let (param_env, ty) = query.into_parts();
179
180         if icx.layout_depth > rec_limit {
181             tcx.sess.fatal(
182                 &format!("overflow representing the type `{}`", ty));
183         }
184
185         // Update the ImplicitCtxt to increase the layout_depth
186         let icx = ty::tls::ImplicitCtxt {
187             layout_depth: icx.layout_depth + 1,
188             ..icx.clone()
189         };
190
191         ty::tls::enter_context(&icx, |_| {
192             let cx = LayoutCx { tcx, param_env };
193             cx.layout_raw_uncached(ty)
194         })
195     })
196 }
197
198 pub fn provide(providers: &mut ty::query::Providers<'_>) {
199     *providers = ty::query::Providers {
200         layout_raw,
201         ..*providers
202     };
203 }
204
205 #[derive(Copy, Clone)]
206 pub struct LayoutCx<'tcx, C> {
207     pub tcx: C,
208     pub param_env: ty::ParamEnv<'tcx>
209 }
210
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212     fn layout_raw_uncached(self, ty: Ty<'tcx>)
213                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214         let tcx = self.tcx;
215         let param_env = self.param_env;
216         let dl = self.data_layout();
217         let scalar_unit = |value: Primitive| {
218             let bits = value.size(dl).bits();
219             assert!(bits <= 128);
220             Scalar {
221                 value,
222                 valid_range: 0..=(!0 >> (128 - bits))
223             }
224         };
225         let scalar = |value: Primitive| {
226             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227         };
228         let scalar_pair = |a: Scalar, b: Scalar| {
229             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231             let size = (b_offset + b.value.size(dl)).abi_align(align);
232             LayoutDetails {
233                 variants: Variants::Single { index: 0 },
234                 fields: FieldPlacement::Arbitrary {
235                     offsets: vec![Size::ZERO, b_offset],
236                     memory_index: vec![0, 1]
237                 },
238                 abi: Abi::ScalarPair(a, b),
239                 align,
240                 size
241             }
242         };
243
244         #[derive(Copy, Clone, Debug)]
245         enum StructKind {
246             /// A tuple, closure, or univariant which cannot be coerced to unsized.
247             AlwaysSized,
248             /// A univariant, the last field of which may be coerced to unsized.
249             MaybeUnsized,
250             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251             Prefixed(Size, Align),
252         }
253         let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
254             let packed = repr.packed();
255             if packed && repr.align > 0 {
256                 bug!("struct cannot be packed and aligned");
257             }
258
259             let pack = {
260                 let pack = repr.pack as u64;
261                 Align::from_bytes(pack, pack).unwrap()
262             };
263
264             let mut align = if packed {
265                 dl.i8_align
266             } else {
267                 dl.aggregate_align
268             };
269
270             let mut sized = true;
271             let mut offsets = vec![Size::ZERO; fields.len()];
272             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
273
274             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
275             if let StructKind::Prefixed(_, align) = kind {
276                 optimize &= align.abi() == 1;
277             }
278
279             if optimize {
280                 let end = if let StructKind::MaybeUnsized = kind {
281                     fields.len() - 1
282                 } else {
283                     fields.len()
284                 };
285                 let optimizing = &mut inverse_memory_index[..end];
286                 let field_align = |f: &TyLayout<'_>| {
287                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
288                 };
289                 match kind {
290                     StructKind::AlwaysSized |
291                     StructKind::MaybeUnsized => {
292                         optimizing.sort_by_key(|&x| {
293                             // Place ZSTs first to avoid "interesting offsets",
294                             // especially with only one or two non-ZST fields.
295                             let f = &fields[x as usize];
296                             (!f.is_zst(), cmp::Reverse(field_align(f)))
297                         });
298                     }
299                     StructKind::Prefixed(..) => {
300                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
301                     }
302                 }
303             }
304
305             // inverse_memory_index holds field indices by increasing memory offset.
306             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
307             // We now write field offsets to the corresponding offset slot;
308             // field 5 with offset 0 puts 0 in offsets[5].
309             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
310
311             let mut offset = Size::ZERO;
312
313             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
314                 if packed {
315                     let prefix_align = prefix_align.min(pack);
316                     align = align.max(prefix_align);
317                 } else {
318                     align = align.max(prefix_align);
319                 }
320                 offset = prefix_size.abi_align(prefix_align);
321             }
322
323             for &i in &inverse_memory_index {
324                 let field = fields[i as usize];
325                 if !sized {
326                     bug!("univariant: field #{} of `{}` comes after unsized field",
327                         offsets.len(), ty);
328                 }
329
330                 if field.is_unsized() {
331                     sized = false;
332                 }
333
334                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
335                 if packed {
336                     let field_pack = field.align.min(pack);
337                     offset = offset.abi_align(field_pack);
338                     align = align.max(field_pack);
339                 }
340                 else {
341                     offset = offset.abi_align(field.align);
342                     align = align.max(field.align);
343                 }
344
345                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
346                 offsets[i as usize] = offset;
347
348                 offset = offset.checked_add(field.size, dl)
349                     .ok_or(LayoutError::SizeOverflow(ty))?;
350             }
351
352             if repr.align > 0 {
353                 let repr_align = repr.align as u64;
354                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
355                 debug!("univariant repr_align: {:?}", repr_align);
356             }
357
358             debug!("univariant min_size: {:?}", offset);
359             let min_size = offset;
360
361             // As stated above, inverse_memory_index holds field indices by increasing offset.
362             // This makes it an already-sorted view of the offsets vec.
363             // To invert it, consider:
364             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
365             // Field 5 would be the first element, so memory_index is i:
366             // Note: if we didn't optimize, it's already right.
367
368             let mut memory_index;
369             if optimize {
370                 memory_index = vec![0; inverse_memory_index.len()];
371
372                 for i in 0..inverse_memory_index.len() {
373                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
374                 }
375             } else {
376                 memory_index = inverse_memory_index;
377             }
378
379             let size = min_size.abi_align(align);
380             let mut abi = Abi::Aggregate { sized };
381
382             // Unpack newtype ABIs and find scalar pairs.
383             if sized && size.bytes() > 0 {
384                 // All other fields must be ZSTs, and we need them to all start at 0.
385                 let mut zst_offsets =
386                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
387                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
388                     let mut non_zst_fields =
389                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
390
391                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
392                         // We have exactly one non-ZST field.
393                         (Some((i, field)), None, None) => {
394                             // Field fills the struct and it has a scalar or scalar pair ABI.
395                             if offsets[i].bytes() == 0 &&
396                                align.abi() == field.align.abi() &&
397                                size == field.size {
398                                 match field.abi {
399                                     // For plain scalars, or vectors of them, we can't unpack
400                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
401                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
402                                         abi = field.abi.clone();
403                                     }
404                                     // But scalar pairs are Rust-specific and get
405                                     // treated as aggregates by C ABIs anyway.
406                                     Abi::ScalarPair(..) => {
407                                         abi = field.abi.clone();
408                                     }
409                                     _ => {}
410                                 }
411                             }
412                         }
413
414                         // Two non-ZST fields, and they're both scalars.
415                         (Some((i, &TyLayout {
416                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
417                         })), Some((j, &TyLayout {
418                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
419                         })), None) => {
420                             // Order by the memory placement, not source order.
421                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
422                                 ((i, a), (j, b))
423                             } else {
424                                 ((j, b), (i, a))
425                             };
426                             let pair = scalar_pair(a.clone(), b.clone());
427                             let pair_offsets = match pair.fields {
428                                 FieldPlacement::Arbitrary {
429                                     ref offsets,
430                                     ref memory_index
431                                 } => {
432                                     assert_eq!(memory_index, &[0, 1]);
433                                     offsets
434                                 }
435                                 _ => bug!()
436                             };
437                             if offsets[i] == pair_offsets[0] &&
438                                offsets[j] == pair_offsets[1] &&
439                                align == pair.align &&
440                                size == pair.size {
441                                 // We can use `ScalarPair` only when it matches our
442                                 // already computed layout (including `#[repr(C)]`).
443                                 abi = pair.abi;
444                             }
445                         }
446
447                         _ => {}
448                     }
449                 }
450             }
451
452             if sized && fields.iter().any(|f| f.abi == Abi::Uninhabited) {
453                 abi = Abi::Uninhabited;
454             }
455
456             Ok(LayoutDetails {
457                 variants: Variants::Single { index: 0 },
458                 fields: FieldPlacement::Arbitrary {
459                     offsets,
460                     memory_index
461                 },
462                 abi,
463                 align,
464                 size
465             })
466         };
467         let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
468             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
469         };
470         debug_assert!(!ty.has_infer_types());
471
472         Ok(match ty.sty {
473             // Basic scalars.
474             ty::Bool => {
475                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
476                     value: Int(I8, false),
477                     valid_range: 0..=1
478                 }))
479             }
480             ty::Char => {
481                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
482                     value: Int(I32, false),
483                     valid_range: 0..=0x10FFFF
484                 }))
485             }
486             ty::Int(ity) => {
487                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
488             }
489             ty::Uint(ity) => {
490                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
491             }
492             ty::Float(fty) => scalar(Float(fty)),
493             ty::FnPtr(_) => {
494                 let mut ptr = scalar_unit(Pointer);
495                 ptr.valid_range = 1..=*ptr.valid_range.end();
496                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
497             }
498
499             // The never type.
500             ty::Never => {
501                 tcx.intern_layout(LayoutDetails {
502                     variants: Variants::Single { index: 0 },
503                     fields: FieldPlacement::Union(0),
504                     abi: Abi::Uninhabited,
505                     align: dl.i8_align,
506                     size: Size::ZERO
507                 })
508             }
509
510             // Potentially-fat pointers.
511             ty::Ref(_, pointee, _) |
512             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
513                 let mut data_ptr = scalar_unit(Pointer);
514                 if !ty.is_unsafe_ptr() {
515                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
516                 }
517
518                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
519                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
520                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
521                 }
522
523                 let unsized_part = tcx.struct_tail(pointee);
524                 let metadata = match unsized_part.sty {
525                     ty::Foreign(..) => {
526                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
527                     }
528                     ty::Slice(_) | ty::Str => {
529                         scalar_unit(Int(dl.ptr_sized_integer(), false))
530                     }
531                     ty::Dynamic(..) => {
532                         let mut vtable = scalar_unit(Pointer);
533                         vtable.valid_range = 1..=*vtable.valid_range.end();
534                         vtable
535                     }
536                     _ => return Err(LayoutError::Unknown(unsized_part))
537                 };
538
539                 // Effectively a (ptr, meta) tuple.
540                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
541             }
542
543             // Arrays and slices.
544             ty::Array(element, mut count) => {
545                 if count.has_projections() {
546                     count = tcx.normalize_erasing_regions(param_env, count);
547                     if count.has_projections() {
548                         return Err(LayoutError::Unknown(ty));
549                     }
550                 }
551
552                 let element = self.layout_of(element)?;
553                 let count = count.unwrap_usize(tcx);
554                 let size = element.size.checked_mul(count, dl)
555                     .ok_or(LayoutError::SizeOverflow(ty))?;
556
557                 tcx.intern_layout(LayoutDetails {
558                     variants: Variants::Single { index: 0 },
559                     fields: FieldPlacement::Array {
560                         stride: element.size,
561                         count
562                     },
563                     abi: Abi::Aggregate { sized: true },
564                     align: element.align,
565                     size
566                 })
567             }
568             ty::Slice(element) => {
569                 let element = self.layout_of(element)?;
570                 tcx.intern_layout(LayoutDetails {
571                     variants: Variants::Single { index: 0 },
572                     fields: FieldPlacement::Array {
573                         stride: element.size,
574                         count: 0
575                     },
576                     abi: Abi::Aggregate { sized: false },
577                     align: element.align,
578                     size: Size::ZERO
579                 })
580             }
581             ty::Str => {
582                 tcx.intern_layout(LayoutDetails {
583                     variants: Variants::Single { index: 0 },
584                     fields: FieldPlacement::Array {
585                         stride: Size::from_bytes(1),
586                         count: 0
587                     },
588                     abi: Abi::Aggregate { sized: false },
589                     align: dl.i8_align,
590                     size: Size::ZERO
591                 })
592             }
593
594             // Odd unit types.
595             ty::FnDef(..) => {
596                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
597             }
598             ty::Dynamic(..) | ty::Foreign(..) => {
599                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
600                   StructKind::AlwaysSized)?;
601                 match unit.abi {
602                     Abi::Aggregate { ref mut sized } => *sized = false,
603                     _ => bug!()
604                 }
605                 tcx.intern_layout(unit)
606             }
607
608             // Tuples, generators and closures.
609             ty::Generator(def_id, ref substs, _) => {
610                 let tys = substs.field_tys(def_id, tcx);
611                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
612                     &ReprOptions::default(),
613                     StructKind::AlwaysSized)?
614             }
615
616             ty::Closure(def_id, ref substs) => {
617                 let tys = substs.upvar_tys(def_id, tcx);
618                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
619                     &ReprOptions::default(),
620                     StructKind::AlwaysSized)?
621             }
622
623             ty::Tuple(tys) => {
624                 let kind = if tys.len() == 0 {
625                     StructKind::AlwaysSized
626                 } else {
627                     StructKind::MaybeUnsized
628                 };
629
630                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
631                     &ReprOptions::default(), kind)?
632             }
633
634             // SIMD vector types.
635             ty::Adt(def, ..) if def.repr.simd() => {
636                 let element = self.layout_of(ty.simd_type(tcx))?;
637                 let count = ty.simd_size(tcx) as u64;
638                 assert!(count > 0);
639                 let scalar = match element.abi {
640                     Abi::Scalar(ref scalar) => scalar.clone(),
641                     _ => {
642                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
643                                                 a non-machine element type `{}`",
644                                                 ty, element.ty));
645                     }
646                 };
647                 let size = element.size.checked_mul(count, dl)
648                     .ok_or(LayoutError::SizeOverflow(ty))?;
649                 let align = dl.vector_align(size);
650                 let size = size.abi_align(align);
651
652                 tcx.intern_layout(LayoutDetails {
653                     variants: Variants::Single { index: 0 },
654                     fields: FieldPlacement::Array {
655                         stride: element.size,
656                         count
657                     },
658                     abi: Abi::Vector {
659                         element: scalar,
660                         count
661                     },
662                     size,
663                     align,
664                 })
665             }
666
667             // ADTs.
668             ty::Adt(def, substs) => {
669                 // Cache the field layouts.
670                 let variants = def.variants.iter().map(|v| {
671                     v.fields.iter().map(|field| {
672                         self.layout_of(field.ty(tcx, substs))
673                     }).collect::<Result<Vec<_>, _>>()
674                 }).collect::<Result<Vec<_>, _>>()?;
675
676                 if def.is_union() {
677                     let packed = def.repr.packed();
678                     if packed && def.repr.align > 0 {
679                         bug!("Union cannot be packed and aligned");
680                     }
681
682                     let pack = {
683                         let pack = def.repr.pack as u64;
684                         Align::from_bytes(pack, pack).unwrap()
685                     };
686
687                     let mut align = if packed {
688                         dl.i8_align
689                     } else {
690                         dl.aggregate_align
691                     };
692
693                     if def.repr.align > 0 {
694                         let repr_align = def.repr.align as u64;
695                         align = align.max(
696                             Align::from_bytes(repr_align, repr_align).unwrap());
697                     }
698
699                     let mut size = Size::ZERO;
700                     for field in &variants[0] {
701                         assert!(!field.is_unsized());
702
703                         if packed {
704                             let field_pack = field.align.min(pack);
705                             align = align.max(field_pack);
706                         } else {
707                             align = align.max(field.align);
708                         }
709                         size = cmp::max(size, field.size);
710                     }
711
712                     return Ok(tcx.intern_layout(LayoutDetails {
713                         variants: Variants::Single { index: 0 },
714                         fields: FieldPlacement::Union(variants[0].len()),
715                         abi: Abi::Aggregate { sized: true },
716                         align,
717                         size: size.abi_align(align)
718                     }));
719                 }
720
721                 // A variant is absent if it's uninhabited and only has ZST fields.
722                 // Present uninhabited variants only require space for their fields,
723                 // but *not* an encoding of the discriminant (e.g. a tag value).
724                 // See issue #49298 for more details on the need to leave space
725                 // for non-ZST uninhabited data (mostly partial initialization).
726                 let absent = |fields: &[TyLayout<'_>]| {
727                     let uninhabited = fields.iter().any(|f| f.abi == Abi::Uninhabited);
728                     let is_zst = fields.iter().all(|f| f.is_zst());
729                     uninhabited && is_zst
730                 };
731                 let (present_first, present_second) = {
732                     let mut present_variants = (0..variants.len()).filter(|&v| {
733                         !absent(&variants[v])
734                     });
735                     (present_variants.next(), present_variants.next())
736                 };
737                 if present_first.is_none() {
738                     // Uninhabited because it has no variants, or only absent ones.
739                     return tcx.layout_raw(param_env.and(tcx.types.never));
740                 }
741
742                 let is_struct = !def.is_enum() ||
743                     // Only one variant is present.
744                     (present_second.is_none() &&
745                     // Representation optimizations are allowed.
746                      !def.repr.inhibit_enum_layout_opt());
747                 if is_struct {
748                     // Struct, or univariant enum equivalent to a struct.
749                     // (Typechecking will reject discriminant-sizing attrs.)
750
751                     let v = present_first.unwrap();
752                     let kind = if def.is_enum() || variants[v].len() == 0 {
753                         StructKind::AlwaysSized
754                     } else {
755                         let param_env = tcx.param_env(def.did);
756                         let last_field = def.variants[v].fields.last().unwrap();
757                         let always_sized = tcx.type_of(last_field.did)
758                           .is_sized(tcx.at(DUMMY_SP), param_env);
759                         if !always_sized { StructKind::MaybeUnsized }
760                         else { StructKind::AlwaysSized }
761                     };
762
763                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
764                     st.variants = Variants::Single { index: v };
765                     let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
766                     match st.abi {
767                         Abi::Scalar(ref mut scalar) |
768                         Abi::ScalarPair(ref mut scalar, _) => {
769                             // the asserts ensure that we are not using the
770                             // `#[rustc_layout_scalar_valid_range(n)]`
771                             // attribute to widen the range of anything as that would probably
772                             // result in UB somewhere
773                             if let Bound::Included(start) = start {
774                                 assert!(*scalar.valid_range.start() <= start);
775                                 scalar.valid_range = start..=*scalar.valid_range.end();
776                             }
777                             if let Bound::Included(end) = end {
778                                 assert!(*scalar.valid_range.end() >= end);
779                                 scalar.valid_range = *scalar.valid_range.start()..=end;
780                             }
781                         }
782                         _ => assert!(
783                             start == Bound::Unbounded && end == Bound::Unbounded,
784                             "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
785                             def,
786                             st,
787                         ),
788                     }
789                     return Ok(tcx.intern_layout(st));
790                 }
791
792                 // The current code for niche-filling relies on variant indices
793                 // instead of actual discriminants, so dataful enums with
794                 // explicit discriminants (RFC #2363) would misbehave.
795                 let no_explicit_discriminants = def.variants.iter().enumerate()
796                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
797
798                 // Niche-filling enum optimization.
799                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
800                     let mut dataful_variant = None;
801                     let mut niche_variants = usize::max_value()..=0;
802
803                     // Find one non-ZST variant.
804                     'variants: for (v, fields) in variants.iter().enumerate() {
805                         if absent(fields) {
806                             continue 'variants;
807                         }
808                         for f in fields {
809                             if !f.is_zst() {
810                                 if dataful_variant.is_none() {
811                                     dataful_variant = Some(v);
812                                     continue 'variants;
813                                 } else {
814                                     dataful_variant = None;
815                                     break 'variants;
816                                 }
817                             }
818                         }
819                         niche_variants = *niche_variants.start().min(&v)..=v;
820                     }
821
822                     if niche_variants.start() > niche_variants.end() {
823                         dataful_variant = None;
824                     }
825
826                     if let Some(i) = dataful_variant {
827                         let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
828                         for (field_index, &field) in variants[i].iter().enumerate() {
829                             let niche = match self.find_niche(field)? {
830                                 Some(niche) => niche,
831                                 _ => continue,
832                             };
833                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
834                                 Some(pair) => pair,
835                                 None => continue,
836                             };
837
838                             let mut align = dl.aggregate_align;
839                             let st = variants.iter().enumerate().map(|(j, v)| {
840                                 let mut st = univariant_uninterned(v,
841                                     &def.repr, StructKind::AlwaysSized)?;
842                                 st.variants = Variants::Single { index: j };
843
844                                 align = align.max(st.align);
845
846                                 Ok(st)
847                             }).collect::<Result<Vec<_>, _>>()?;
848
849                             let offset = st[i].fields.offset(field_index) + niche.offset;
850                             let size = st[i].size;
851
852                             let mut abi = match st[i].abi {
853                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
854                                 Abi::ScalarPair(ref first, ref second) => {
855                                     // We need to use scalar_unit to reset the
856                                     // valid range to the maximal one for that
857                                     // primitive, because only the niche is
858                                     // guaranteed to be initialised, not the
859                                     // other primitive.
860                                     if offset.bytes() == 0 {
861                                         Abi::ScalarPair(
862                                             niche_scalar.clone(),
863                                             scalar_unit(second.value),
864                                         )
865                                     } else {
866                                         Abi::ScalarPair(
867                                             scalar_unit(first.value),
868                                             niche_scalar.clone(),
869                                         )
870                                     }
871                                 }
872                                 _ => Abi::Aggregate { sized: true },
873                             };
874
875                             if st.iter().all(|v| v.abi == Abi::Uninhabited) {
876                                 abi = Abi::Uninhabited;
877                             }
878
879                             return Ok(tcx.intern_layout(LayoutDetails {
880                                 variants: Variants::NicheFilling {
881                                     dataful_variant: i,
882                                     niche_variants,
883                                     niche: niche_scalar,
884                                     niche_start,
885                                     variants: st,
886                                 },
887                                 fields: FieldPlacement::Arbitrary {
888                                     offsets: vec![offset],
889                                     memory_index: vec![0]
890                                 },
891                                 abi,
892                                 size,
893                                 align,
894                             }));
895                         }
896                     }
897                 }
898
899                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
900                 let discr_type = def.repr.discr_type();
901                 let bits = Integer::from_attr(tcx, discr_type).size().bits();
902                 for (i, discr) in def.discriminants(tcx).enumerate() {
903                     if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
904                         continue;
905                     }
906                     let mut x = discr.val as i128;
907                     if discr_type.is_signed() {
908                         // sign extend the raw representation to be an i128
909                         x = (x << (128 - bits)) >> (128 - bits);
910                     }
911                     if x < min { min = x; }
912                     if x > max { max = x; }
913                 }
914                 // We might have no inhabited variants, so pretend there's at least one.
915                 if (min, max) == (i128::max_value(), i128::min_value()) {
916                     min = 0;
917                     max = 0;
918                 }
919                 assert!(min <= max, "discriminant range is {}...{}", min, max);
920                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
921
922                 let mut align = dl.aggregate_align;
923                 let mut size = Size::ZERO;
924
925                 // We're interested in the smallest alignment, so start large.
926                 let mut start_align = Align::from_bytes(256, 256).unwrap();
927                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
928
929                 // repr(C) on an enum tells us to make a (tag, union) layout,
930                 // so we need to grow the prefix alignment to be at least
931                 // the alignment of the union. (This value is used both for
932                 // determining the alignment of the overall enum, and the
933                 // determining the alignment of the payload after the tag.)
934                 let mut prefix_align = min_ity.align(dl);
935                 if def.repr.c() {
936                     for fields in &variants {
937                         for field in fields {
938                             prefix_align = prefix_align.max(field.align);
939                         }
940                     }
941                 }
942
943                 // Create the set of structs that represent each variant.
944                 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
945                     let mut st = univariant_uninterned(&field_layouts,
946                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
947                     st.variants = Variants::Single { index: i };
948                     // Find the first field we can't move later
949                     // to make room for a larger discriminant.
950                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
951                         if !field.is_zst() || field.align.abi() != 1 {
952                             start_align = start_align.min(field.align);
953                             break;
954                         }
955                     }
956                     size = cmp::max(size, st.size);
957                     align = align.max(st.align);
958                     Ok(st)
959                 }).collect::<Result<Vec<_>, _>>()?;
960
961                 // Align the maximum variant size to the largest alignment.
962                 size = size.abi_align(align);
963
964                 if size.bytes() >= dl.obj_size_bound() {
965                     return Err(LayoutError::SizeOverflow(ty));
966                 }
967
968                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
969                 if typeck_ity < min_ity {
970                     // It is a bug if Layout decided on a greater discriminant size than typeck for
971                     // some reason at this point (based on values discriminant can take on). Mostly
972                     // because this discriminant will be loaded, and then stored into variable of
973                     // type calculated by typeck. Consider such case (a bug): typeck decided on
974                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
975                     // discriminant values. That would be a bug, because then, in codegen, in order
976                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
977                     // space necessary to represent would have to be discarded (or layout is wrong
978                     // on thinking it needs 16 bits)
979                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
980                          min_ity, typeck_ity);
981                     // However, it is fine to make discr type however large (as an optimisation)
982                     // after this point â€“ we’ll just truncate the value we load in codegen.
983                 }
984
985                 // Check to see if we should use a different type for the
986                 // discriminant. We can safely use a type with the same size
987                 // as the alignment of the first field of each variant.
988                 // We increase the size of the discriminant to avoid LLVM copying
989                 // padding when it doesn't need to. This normally causes unaligned
990                 // load/stores and excessive memcpy/memset operations. By using a
991                 // bigger integer size, LLVM can be sure about its contents and
992                 // won't be so conservative.
993
994                 // Use the initial field alignment
995                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
996                     min_ity
997                 } else {
998                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
999                 };
1000
1001                 // If the alignment is not larger than the chosen discriminant size,
1002                 // don't use the alignment as the final size.
1003                 if ity <= min_ity {
1004                     ity = min_ity;
1005                 } else {
1006                     // Patch up the variants' first few fields.
1007                     let old_ity_size = min_ity.size();
1008                     let new_ity_size = ity.size();
1009                     for variant in &mut layout_variants {
1010                         match variant.fields {
1011                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1012                                 for i in offsets {
1013                                     if *i <= old_ity_size {
1014                                         assert_eq!(*i, old_ity_size);
1015                                         *i = new_ity_size;
1016                                     }
1017                                 }
1018                                 // We might be making the struct larger.
1019                                 if variant.size <= old_ity_size {
1020                                     variant.size = new_ity_size;
1021                                 }
1022                             }
1023                             _ => bug!()
1024                         }
1025                     }
1026                 }
1027
1028                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1029                 let tag = Scalar {
1030                     value: Int(ity, signed),
1031                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1032                 };
1033                 let mut abi = Abi::Aggregate { sized: true };
1034                 if tag.value.size(dl) == size {
1035                     abi = Abi::Scalar(tag.clone());
1036                 } else {
1037                     // Try to use a ScalarPair for all tagged enums.
1038                     let mut common_prim = None;
1039                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1040                         let offsets = match layout_variant.fields {
1041                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1042                             _ => bug!(),
1043                         };
1044                         let mut fields = field_layouts
1045                             .iter()
1046                             .zip(offsets)
1047                             .filter(|p| !p.0.is_zst());
1048                         let (field, offset) = match (fields.next(), fields.next()) {
1049                             (None, None) => continue,
1050                             (Some(pair), None) => pair,
1051                             _ => {
1052                                 common_prim = None;
1053                                 break;
1054                             }
1055                         };
1056                         let prim = match field.details.abi {
1057                             Abi::Scalar(ref scalar) => scalar.value,
1058                             _ => {
1059                                 common_prim = None;
1060                                 break;
1061                             }
1062                         };
1063                         if let Some(pair) = common_prim {
1064                             // This is pretty conservative. We could go fancier
1065                             // by conflating things like i32 and u32, or even
1066                             // realising that (u8, u8) could just cohabit with
1067                             // u16 or even u32.
1068                             if pair != (prim, offset) {
1069                                 common_prim = None;
1070                                 break;
1071                             }
1072                         } else {
1073                             common_prim = Some((prim, offset));
1074                         }
1075                     }
1076                     if let Some((prim, offset)) = common_prim {
1077                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1078                         let pair_offsets = match pair.fields {
1079                             FieldPlacement::Arbitrary {
1080                                 ref offsets,
1081                                 ref memory_index
1082                             } => {
1083                                 assert_eq!(memory_index, &[0, 1]);
1084                                 offsets
1085                             }
1086                             _ => bug!()
1087                         };
1088                         if pair_offsets[0] == Size::ZERO &&
1089                             pair_offsets[1] == *offset &&
1090                             align == pair.align &&
1091                             size == pair.size {
1092                             // We can use `ScalarPair` only when it matches our
1093                             // already computed layout (including `#[repr(C)]`).
1094                             abi = pair.abi;
1095                         }
1096                     }
1097                 }
1098
1099                 if layout_variants.iter().all(|v| v.abi == Abi::Uninhabited) {
1100                     abi = Abi::Uninhabited;
1101                 }
1102
1103                 tcx.intern_layout(LayoutDetails {
1104                     variants: Variants::Tagged {
1105                         tag,
1106                         variants: layout_variants,
1107                     },
1108                     fields: FieldPlacement::Arbitrary {
1109                         offsets: vec![Size::ZERO],
1110                         memory_index: vec![0]
1111                     },
1112                     abi,
1113                     align,
1114                     size
1115                 })
1116             }
1117
1118             // Types with no meaningful known layout.
1119             ty::Projection(_) | ty::Opaque(..) => {
1120                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1121                 if ty == normalized {
1122                     return Err(LayoutError::Unknown(ty));
1123                 }
1124                 tcx.layout_raw(param_env.and(normalized))?
1125             }
1126             ty::GeneratorWitness(..) | ty::Infer(_) => {
1127                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1128             }
1129             ty::Param(_) | ty::Error => {
1130                 return Err(LayoutError::Unknown(ty));
1131             }
1132         })
1133     }
1134
1135     /// This is invoked by the `layout_raw` query to record the final
1136     /// layout of each type.
1137     #[inline]
1138     fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1139         // If we are running with `-Zprint-type-sizes`, record layouts for
1140         // dumping later. Ignore layouts that are done with non-empty
1141         // environments or non-monomorphic layouts, as the user only wants
1142         // to see the stuff resulting from the final codegen session.
1143         if
1144             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1145             layout.ty.has_param_types() ||
1146             layout.ty.has_self_ty() ||
1147             !self.param_env.caller_bounds.is_empty()
1148         {
1149             return;
1150         }
1151
1152         self.record_layout_for_printing_outlined(layout)
1153     }
1154
1155     fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1156         // (delay format until we actually need it)
1157         let record = |kind, packed, opt_discr_size, variants| {
1158             let type_desc = format!("{:?}", layout.ty);
1159             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1160                                                                    type_desc,
1161                                                                    layout.align,
1162                                                                    layout.size,
1163                                                                    packed,
1164                                                                    opt_discr_size,
1165                                                                    variants);
1166         };
1167
1168         let adt_def = match layout.ty.sty {
1169             ty::Adt(ref adt_def, _) => {
1170                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1171                 adt_def
1172             }
1173
1174             ty::Closure(..) => {
1175                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1176                 record(DataTypeKind::Closure, false, None, vec![]);
1177                 return;
1178             }
1179
1180             _ => {
1181                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1182                 return;
1183             }
1184         };
1185
1186         let adt_kind = adt_def.adt_kind();
1187         let adt_packed = adt_def.repr.packed();
1188
1189         let build_variant_info = |n: Option<ast::Name>,
1190                                   flds: &[ast::Name],
1191                                   layout: TyLayout<'tcx>| {
1192             let mut min_size = Size::ZERO;
1193             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1194                 match layout.field(self, i) {
1195                     Err(err) => {
1196                         bug!("no layout found for field {}: `{:?}`", name, err);
1197                     }
1198                     Ok(field_layout) => {
1199                         let offset = layout.fields.offset(i);
1200                         let field_end = offset + field_layout.size;
1201                         if min_size < field_end {
1202                             min_size = field_end;
1203                         }
1204                         session::FieldInfo {
1205                             name: name.to_string(),
1206                             offset: offset.bytes(),
1207                             size: field_layout.size.bytes(),
1208                             align: field_layout.align.abi(),
1209                         }
1210                     }
1211                 }
1212             }).collect();
1213
1214             session::VariantInfo {
1215                 name: n.map(|n|n.to_string()),
1216                 kind: if layout.is_unsized() {
1217                     session::SizeKind::Min
1218                 } else {
1219                     session::SizeKind::Exact
1220                 },
1221                 align: layout.align.abi(),
1222                 size: if min_size.bytes() == 0 {
1223                     layout.size.bytes()
1224                 } else {
1225                     min_size.bytes()
1226                 },
1227                 fields: field_info,
1228             }
1229         };
1230
1231         match layout.variants {
1232             Variants::Single { index } => {
1233                 debug!("print-type-size `{:#?}` variant {}",
1234                        layout, adt_def.variants[index].name);
1235                 if !adt_def.variants.is_empty() {
1236                     let variant_def = &adt_def.variants[index];
1237                     let fields: Vec<_> =
1238                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1239                     record(adt_kind.into(),
1240                            adt_packed,
1241                            None,
1242                            vec![build_variant_info(Some(variant_def.name),
1243                                                    &fields,
1244                                                    layout)]);
1245                 } else {
1246                     // (This case arises for *empty* enums; so give it
1247                     // zero variants.)
1248                     record(adt_kind.into(), adt_packed, None, vec![]);
1249                 }
1250             }
1251
1252             Variants::NicheFilling { .. } |
1253             Variants::Tagged { .. } => {
1254                 debug!("print-type-size `{:#?}` adt general variants def {}",
1255                        layout.ty, adt_def.variants.len());
1256                 let variant_infos: Vec<_> =
1257                     adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1258                         let fields: Vec<_> =
1259                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1260                         build_variant_info(Some(variant_def.name),
1261                                             &fields,
1262                                             layout.for_variant(self, i))
1263                     })
1264                     .collect();
1265                 record(adt_kind.into(), adt_packed, match layout.variants {
1266                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1267                     _ => None
1268                 }, variant_infos);
1269             }
1270         }
1271     }
1272 }
1273
1274 /// Type size "skeleton", i.e. the only information determining a type's size.
1275 /// While this is conservative, (aside from constant sizes, only pointers,
1276 /// newtypes thereof and null pointer optimized enums are allowed), it is
1277 /// enough to statically check common usecases of transmute.
1278 #[derive(Copy, Clone, Debug)]
1279 pub enum SizeSkeleton<'tcx> {
1280     /// Any statically computable Layout.
1281     Known(Size),
1282
1283     /// A potentially-fat pointer.
1284     Pointer {
1285         /// If true, this pointer is never null.
1286         non_zero: bool,
1287         /// The type which determines the unsized metadata, if any,
1288         /// of this pointer. Either a type parameter or a projection
1289         /// depending on one, with regions erased.
1290         tail: Ty<'tcx>
1291     }
1292 }
1293
1294 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1295     pub fn compute(ty: Ty<'tcx>,
1296                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1297                    param_env: ty::ParamEnv<'tcx>)
1298                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1299         debug_assert!(!ty.has_infer_types());
1300
1301         // First try computing a static layout.
1302         let err = match tcx.layout_of(param_env.and(ty)) {
1303             Ok(layout) => {
1304                 return Ok(SizeSkeleton::Known(layout.size));
1305             }
1306             Err(err) => err
1307         };
1308
1309         match ty.sty {
1310             ty::Ref(_, pointee, _) |
1311             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1312                 let non_zero = !ty.is_unsafe_ptr();
1313                 let tail = tcx.struct_tail(pointee);
1314                 match tail.sty {
1315                     ty::Param(_) | ty::Projection(_) => {
1316                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1317                         Ok(SizeSkeleton::Pointer {
1318                             non_zero,
1319                             tail: tcx.erase_regions(&tail)
1320                         })
1321                     }
1322                     _ => {
1323                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1324                               tail `{}` is not a type parameter or a projection",
1325                              ty, err, tail)
1326                     }
1327                 }
1328             }
1329
1330             ty::Adt(def, substs) => {
1331                 // Only newtypes and enums w/ nullable pointer optimization.
1332                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1333                     return Err(err);
1334                 }
1335
1336                 // Get a zero-sized variant or a pointer newtype.
1337                 let zero_or_ptr_variant = |i: usize| {
1338                     let fields = def.variants[i].fields.iter().map(|field| {
1339                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1340                     });
1341                     let mut ptr = None;
1342                     for field in fields {
1343                         let field = field?;
1344                         match field {
1345                             SizeSkeleton::Known(size) => {
1346                                 if size.bytes() > 0 {
1347                                     return Err(err);
1348                                 }
1349                             }
1350                             SizeSkeleton::Pointer {..} => {
1351                                 if ptr.is_some() {
1352                                     return Err(err);
1353                                 }
1354                                 ptr = Some(field);
1355                             }
1356                         }
1357                     }
1358                     Ok(ptr)
1359                 };
1360
1361                 let v0 = zero_or_ptr_variant(0)?;
1362                 // Newtype.
1363                 if def.variants.len() == 1 {
1364                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1365                         return Ok(SizeSkeleton::Pointer {
1366                             non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1367                                 (Bound::Included(start), Bound::Unbounded) => start > 0,
1368                                 (Bound::Included(start), Bound::Included(end)) =>
1369                                     0 < start && start < end,
1370                                 _ => false,
1371                             },
1372                             tail,
1373                         });
1374                     } else {
1375                         return Err(err);
1376                     }
1377                 }
1378
1379                 let v1 = zero_or_ptr_variant(1)?;
1380                 // Nullable pointer enum optimization.
1381                 match (v0, v1) {
1382                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1383                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1384                         Ok(SizeSkeleton::Pointer {
1385                             non_zero: false,
1386                             tail,
1387                         })
1388                     }
1389                     _ => Err(err)
1390                 }
1391             }
1392
1393             ty::Projection(_) | ty::Opaque(..) => {
1394                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1395                 if ty == normalized {
1396                     Err(err)
1397                 } else {
1398                     SizeSkeleton::compute(normalized, tcx, param_env)
1399                 }
1400             }
1401
1402             _ => Err(err)
1403         }
1404     }
1405
1406     pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1407         match (self, other) {
1408             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1409             (SizeSkeleton::Pointer { tail: a, .. },
1410              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1411             _ => false
1412         }
1413     }
1414 }
1415
1416 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1417     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1418 }
1419
1420 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1421     fn data_layout(&self) -> &TargetDataLayout {
1422         &self.data_layout
1423     }
1424 }
1425
1426 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1427     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1428         self.global_tcx()
1429     }
1430 }
1431
1432 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1433     fn data_layout(&self) -> &TargetDataLayout {
1434         self.tcx.data_layout()
1435     }
1436 }
1437
1438 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1439     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1440         self.tcx.tcx()
1441     }
1442 }
1443
1444 pub trait MaybeResult<T> {
1445     fn from_ok(x: T) -> Self;
1446     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1447 }
1448
1449 impl<T> MaybeResult<T> for T {
1450     fn from_ok(x: T) -> Self {
1451         x
1452     }
1453     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1454         f(self)
1455     }
1456 }
1457
1458 impl<T, E> MaybeResult<T> for Result<T, E> {
1459     fn from_ok(x: T) -> Self {
1460         Ok(x)
1461     }
1462     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1463         self.map(f)
1464     }
1465 }
1466
1467 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1468
1469 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1470     type Ty = Ty<'tcx>;
1471     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1472
1473     /// Computes the layout of a type. Note that this implicitly
1474     /// executes in "reveal all" mode.
1475     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1476         let param_env = self.param_env.with_reveal_all();
1477         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1478         let details = self.tcx.layout_raw(param_env.and(ty))?;
1479         let layout = TyLayout {
1480             ty,
1481             details
1482         };
1483
1484         // NB: This recording is normally disabled; when enabled, it
1485         // can however trigger recursive invocations of `layout_of`.
1486         // Therefore, we execute it *after* the main query has
1487         // completed, to avoid problems around recursive structures
1488         // and the like. (Admittedly, I wasn't able to reproduce a problem
1489         // here, but it seems like the right thing to do. -nmatsakis)
1490         self.record_layout_for_printing(layout);
1491
1492         Ok(layout)
1493     }
1494 }
1495
1496 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1497     type Ty = Ty<'tcx>;
1498     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1499
1500     /// Computes the layout of a type. Note that this implicitly
1501     /// executes in "reveal all" mode.
1502     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1503         let param_env = self.param_env.with_reveal_all();
1504         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1505         let details = self.tcx.layout_raw(param_env.and(ty))?;
1506         let layout = TyLayout {
1507             ty,
1508             details
1509         };
1510
1511         // NB: This recording is normally disabled; when enabled, it
1512         // can however trigger recursive invocations of `layout_of`.
1513         // Therefore, we execute it *after* the main query has
1514         // completed, to avoid problems around recursive structures
1515         // and the like. (Admittedly, I wasn't able to reproduce a problem
1516         // here, but it seems like the right thing to do. -nmatsakis)
1517         let cx = LayoutCx {
1518             tcx: *self.tcx,
1519             param_env: self.param_env
1520         };
1521         cx.record_layout_for_printing(layout);
1522
1523         Ok(layout)
1524     }
1525 }
1526
1527 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1528 impl TyCtxt<'a, 'tcx, '_> {
1529     /// Computes the layout of a type. Note that this implicitly
1530     /// executes in "reveal all" mode.
1531     #[inline]
1532     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1533                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1534         let cx = LayoutCx {
1535             tcx: self.global_tcx(),
1536             param_env: param_env_and_ty.param_env
1537         };
1538         cx.layout_of(param_env_and_ty.value)
1539     }
1540 }
1541
1542 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1543     /// Computes the layout of a type. Note that this implicitly
1544     /// executes in "reveal all" mode.
1545     #[inline]
1546     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1547                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1548         let cx = LayoutCx {
1549             tcx: self.global_tcx().at(self.span),
1550             param_env: param_env_and_ty.param_env
1551         };
1552         cx.layout_of(param_env_and_ty.value)
1553     }
1554 }
1555
1556 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1557     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1558           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1559 {
1560     fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1561         let details = match this.variants {
1562             Variants::Single { index } if index == variant_index => this.details,
1563
1564             Variants::Single { index } => {
1565                 // Deny calling for_variant more than once for non-Single enums.
1566                 cx.layout_of(this.ty).map_same(|layout| {
1567                     assert_eq!(layout.variants, Variants::Single { index });
1568                     layout
1569                 });
1570
1571                 let fields = match this.ty.sty {
1572                     ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1573                     _ => bug!()
1574                 };
1575                 let tcx = cx.tcx();
1576                 tcx.intern_layout(LayoutDetails {
1577                     variants: Variants::Single { index: variant_index },
1578                     fields: FieldPlacement::Union(fields),
1579                     abi: Abi::Uninhabited,
1580                     align: tcx.data_layout.i8_align,
1581                     size: Size::ZERO
1582                 })
1583             }
1584
1585             Variants::NicheFilling { ref variants, .. } |
1586             Variants::Tagged { ref variants, .. } => {
1587                 &variants[variant_index]
1588             }
1589         };
1590
1591         assert_eq!(details.variants, Variants::Single { index: variant_index });
1592
1593         TyLayout {
1594             ty: this.ty,
1595             details
1596         }
1597     }
1598
1599     fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1600         let tcx = cx.tcx();
1601         cx.layout_of(match this.ty.sty {
1602             ty::Bool |
1603             ty::Char |
1604             ty::Int(_) |
1605             ty::Uint(_) |
1606             ty::Float(_) |
1607             ty::FnPtr(_) |
1608             ty::Never |
1609             ty::FnDef(..) |
1610             ty::GeneratorWitness(..) |
1611             ty::Foreign(..) |
1612             ty::Dynamic(..) => {
1613                 bug!("TyLayout::field_type({:?}): not applicable", this)
1614             }
1615
1616             // Potentially-fat pointers.
1617             ty::Ref(_, pointee, _) |
1618             ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1619                 assert!(i < this.fields.count());
1620
1621                 // Reuse the fat *T type as its own thin pointer data field.
1622                 // This provides information about e.g. DST struct pointees
1623                 // (which may have no non-DST form), and will work as long
1624                 // as the `Abi` or `FieldPlacement` is checked by users.
1625                 if i == 0 {
1626                     let nil = tcx.mk_unit();
1627                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1628                         tcx.mk_mut_ptr(nil)
1629                     } else {
1630                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1631                     };
1632                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1633                         ptr_layout.ty = this.ty;
1634                         ptr_layout
1635                     });
1636                 }
1637
1638                 match tcx.struct_tail(pointee).sty {
1639                     ty::Slice(_) |
1640                     ty::Str => tcx.types.usize,
1641                     ty::Dynamic(_, _) => {
1642                         tcx.mk_imm_ref(
1643                             tcx.types.re_static,
1644                             tcx.mk_array(tcx.types.usize, 3),
1645                         )
1646                         /* FIXME use actual fn pointers
1647                         Warning: naively computing the number of entries in the
1648                         vtable by counting the methods on the trait + methods on
1649                         all parent traits does not work, because some methods can
1650                         be not object safe and thus excluded from the vtable.
1651                         Increase this counter if you tried to implement this but
1652                         failed to do it without duplicating a lot of code from
1653                         other places in the compiler: 2
1654                         tcx.mk_tup(&[
1655                             tcx.mk_array(tcx.types.usize, 3),
1656                             tcx.mk_array(Option<fn()>),
1657                         ])
1658                         */
1659                     }
1660                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1661                 }
1662             }
1663
1664             // Arrays and slices.
1665             ty::Array(element, _) |
1666             ty::Slice(element) => element,
1667             ty::Str => tcx.types.u8,
1668
1669             // Tuples, generators and closures.
1670             ty::Closure(def_id, ref substs) => {
1671                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1672             }
1673
1674             ty::Generator(def_id, ref substs, _) => {
1675                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1676             }
1677
1678             ty::Tuple(tys) => tys[i],
1679
1680             // SIMD vector types.
1681             ty::Adt(def, ..) if def.repr.simd() => {
1682                 this.ty.simd_type(tcx)
1683             }
1684
1685             // ADTs.
1686             ty::Adt(def, substs) => {
1687                 match this.variants {
1688                     Variants::Single { index } => {
1689                         def.variants[index].fields[i].ty(tcx, substs)
1690                     }
1691
1692                     // Discriminant field for enums (where applicable).
1693                     Variants::Tagged { tag: ref discr, .. } |
1694                     Variants::NicheFilling { niche: ref discr, .. } => {
1695                         assert_eq!(i, 0);
1696                         let layout = LayoutDetails::scalar(tcx, discr.clone());
1697                         return MaybeResult::from_ok(TyLayout {
1698                             details: tcx.intern_layout(layout),
1699                             ty: discr.value.to_ty(tcx)
1700                         });
1701                     }
1702                 }
1703             }
1704
1705             ty::Projection(_) | ty::Opaque(..) | ty::Param(_) |
1706             ty::Infer(_) | ty::Error => {
1707                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1708             }
1709         })
1710     }
1711 }
1712
1713 struct Niche {
1714     offset: Size,
1715     scalar: Scalar,
1716     available: u128,
1717 }
1718
1719 impl Niche {
1720     fn reserve<'a, 'tcx>(
1721         &self,
1722         cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1723         count: u128,
1724     ) -> Option<(u128, Scalar)> {
1725         if count > self.available {
1726             return None;
1727         }
1728         let Scalar { value, valid_range: ref v } = self.scalar;
1729         let bits = value.size(cx).bits();
1730         assert!(bits <= 128);
1731         let max_value = !0u128 >> (128 - bits);
1732         let start = v.end().wrapping_add(1) & max_value;
1733         let end = v.end().wrapping_add(count) & max_value;
1734         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1735     }
1736 }
1737
1738 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1739     /// Find the offset of a niche leaf field, starting from
1740     /// the given type and recursing through aggregates.
1741     // FIXME(eddyb) traverse already optimized enums.
1742     fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1743         let scalar_niche = |scalar: &Scalar, offset| {
1744             let Scalar { value, valid_range: ref v } = *scalar;
1745
1746             let bits = value.size(self).bits();
1747             assert!(bits <= 128);
1748             let max_value = !0u128 >> (128 - bits);
1749
1750             // Find out how many values are outside the valid range.
1751             let available = if v.start() <= v.end() {
1752                 v.start() + (max_value - v.end())
1753             } else {
1754                 v.start() - v.end() - 1
1755             };
1756
1757             // Give up if there is no niche value available.
1758             if available == 0 {
1759                 return None;
1760             }
1761
1762             Some(Niche { offset, scalar: scalar.clone(), available })
1763         };
1764
1765         // Locals variables which live across yields are stored
1766         // in the generator type as fields. These may be uninitialized
1767         // so we don't look for niches there.
1768         if let ty::Generator(..) = layout.ty.sty {
1769             return Ok(None);
1770         }
1771
1772         match layout.abi {
1773             Abi::Scalar(ref scalar) => {
1774                 return Ok(scalar_niche(scalar, Size::ZERO));
1775             }
1776             Abi::ScalarPair(ref a, ref b) => {
1777                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1778                 // returns the last maximum.
1779                 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1780                     .chain(iter::once((a, Size::ZERO)))
1781                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1782                     .max_by_key(|niche| niche.available);
1783                 return Ok(niche);
1784             }
1785             Abi::Vector { ref element, .. } => {
1786                 return Ok(scalar_niche(element, Size::ZERO));
1787             }
1788             _ => {}
1789         }
1790
1791         // Perhaps one of the fields is non-zero, let's recurse and find out.
1792         if let FieldPlacement::Union(_) = layout.fields {
1793             // Only Rust enums have safe-to-inspect fields
1794             // (a discriminant), other unions are unsafe.
1795             if let Variants::Single { .. } = layout.variants {
1796                 return Ok(None);
1797             }
1798         }
1799         if let FieldPlacement::Array { .. } = layout.fields {
1800             if layout.fields.count() > 0 {
1801                 return self.find_niche(layout.field(self, 0)?);
1802             } else {
1803                 return Ok(None);
1804             }
1805         }
1806         let mut niche = None;
1807         let mut available = 0;
1808         for i in 0..layout.fields.count() {
1809             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1810                 if c.available > available {
1811                     available = c.available;
1812                     c.offset += layout.fields.offset(i);
1813                     niche = Some(c);
1814                 }
1815             }
1816         }
1817         Ok(niche)
1818     }
1819 }
1820
1821 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1822     fn hash_stable<W: StableHasherResult>(&self,
1823                                           hcx: &mut StableHashingContext<'a>,
1824                                           hasher: &mut StableHasher<W>) {
1825         use ty::layout::Variants::*;
1826         mem::discriminant(self).hash_stable(hcx, hasher);
1827
1828         match *self {
1829             Single { index } => {
1830                 index.hash_stable(hcx, hasher);
1831             }
1832             Tagged {
1833                 ref tag,
1834                 ref variants,
1835             } => {
1836                 tag.hash_stable(hcx, hasher);
1837                 variants.hash_stable(hcx, hasher);
1838             }
1839             NicheFilling {
1840                 dataful_variant,
1841                 ref niche_variants,
1842                 ref niche,
1843                 niche_start,
1844                 ref variants,
1845             } => {
1846                 dataful_variant.hash_stable(hcx, hasher);
1847                 niche_variants.start().hash_stable(hcx, hasher);
1848                 niche_variants.end().hash_stable(hcx, hasher);
1849                 niche.hash_stable(hcx, hasher);
1850                 niche_start.hash_stable(hcx, hasher);
1851                 variants.hash_stable(hcx, hasher);
1852             }
1853         }
1854     }
1855 }
1856
1857 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1858     fn hash_stable<W: StableHasherResult>(&self,
1859                                           hcx: &mut StableHashingContext<'a>,
1860                                           hasher: &mut StableHasher<W>) {
1861         use ty::layout::FieldPlacement::*;
1862         mem::discriminant(self).hash_stable(hcx, hasher);
1863
1864         match *self {
1865             Union(count) => {
1866                 count.hash_stable(hcx, hasher);
1867             }
1868             Array { count, stride } => {
1869                 count.hash_stable(hcx, hasher);
1870                 stride.hash_stable(hcx, hasher);
1871             }
1872             Arbitrary { ref offsets, ref memory_index } => {
1873                 offsets.hash_stable(hcx, hasher);
1874                 memory_index.hash_stable(hcx, hasher);
1875             }
1876         }
1877     }
1878 }
1879
1880 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1881     fn hash_stable<W: StableHasherResult>(&self,
1882                                           hcx: &mut StableHashingContext<'a>,
1883                                           hasher: &mut StableHasher<W>) {
1884         use ty::layout::Abi::*;
1885         mem::discriminant(self).hash_stable(hcx, hasher);
1886
1887         match *self {
1888             Uninhabited => {}
1889             Scalar(ref value) => {
1890                 value.hash_stable(hcx, hasher);
1891             }
1892             ScalarPair(ref a, ref b) => {
1893                 a.hash_stable(hcx, hasher);
1894                 b.hash_stable(hcx, hasher);
1895             }
1896             Vector { ref element, count } => {
1897                 element.hash_stable(hcx, hasher);
1898                 count.hash_stable(hcx, hasher);
1899             }
1900             Aggregate { sized } => {
1901                 sized.hash_stable(hcx, hasher);
1902             }
1903         }
1904     }
1905 }
1906
1907 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1908     fn hash_stable<W: StableHasherResult>(&self,
1909                                           hcx: &mut StableHashingContext<'a>,
1910                                           hasher: &mut StableHasher<W>) {
1911         let Scalar { value, ref valid_range } = *self;
1912         value.hash_stable(hcx, hasher);
1913         valid_range.start().hash_stable(hcx, hasher);
1914         valid_range.end().hash_stable(hcx, hasher);
1915     }
1916 }
1917
1918 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1919     variants,
1920     fields,
1921     abi,
1922     size,
1923     align
1924 });
1925
1926 impl_stable_hash_for!(enum ::ty::layout::Integer {
1927     I8,
1928     I16,
1929     I32,
1930     I64,
1931     I128
1932 });
1933
1934 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1935     Int(integer, signed),
1936     Float(fty),
1937     Pointer
1938 });
1939
1940 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1941     fn hash_stable<W: StableHasherResult>(&self,
1942                                           hcx: &mut StableHashingContext<'gcx>,
1943                                           hasher: &mut StableHasher<W>) {
1944         self.abi().hash_stable(hcx, hasher);
1945         self.pref().hash_stable(hcx, hasher);
1946     }
1947 }
1948
1949 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1950     fn hash_stable<W: StableHasherResult>(&self,
1951                                           hcx: &mut StableHashingContext<'gcx>,
1952                                           hasher: &mut StableHasher<W>) {
1953         self.bytes().hash_stable(hcx, hasher);
1954     }
1955 }
1956
1957 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1958 {
1959     fn hash_stable<W: StableHasherResult>(&self,
1960                                           hcx: &mut StableHashingContext<'a>,
1961                                           hasher: &mut StableHasher<W>) {
1962         use ty::layout::LayoutError::*;
1963         mem::discriminant(self).hash_stable(hcx, hasher);
1964
1965         match *self {
1966             Unknown(t) |
1967             SizeOverflow(t) => t.hash_stable(hcx, hasher)
1968         }
1969     }
1970 }