]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Address minor comments
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::iter;
22 use std::mem;
23
24 use ich::StableHashingContext;
25 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
26                                            StableHasherResult};
27
28 pub use rustc_target::abi::*;
29
30 pub trait IntegerExt {
31     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
32     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
33     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
34                   ty: Ty<'tcx>,
35                   repr: &ReprOptions,
36                   min: i128,
37                   max: i128)
38                   -> (Integer, bool);
39 }
40
41 impl IntegerExt for Integer {
42     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
43         match (*self, signed) {
44             (I8, false) => tcx.types.u8,
45             (I16, false) => tcx.types.u16,
46             (I32, false) => tcx.types.u32,
47             (I64, false) => tcx.types.u64,
48             (I128, false) => tcx.types.u128,
49             (I8, true) => tcx.types.i8,
50             (I16, true) => tcx.types.i16,
51             (I32, true) => tcx.types.i32,
52             (I64, true) => tcx.types.i64,
53             (I128, true) => tcx.types.i128,
54         }
55     }
56
57     /// Get the Integer type from an attr::IntType.
58     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
59         let dl = cx.data_layout();
60
61         match ity {
62             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
63             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
64             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
65             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
66             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
67             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
68                 dl.ptr_sized_integer()
69             }
70         }
71     }
72
73     /// Find the appropriate Integer type and signedness for the given
74     /// signed discriminant range and #[repr] attribute.
75     /// N.B.: u128 values above i128::MAX will be treated as signed, but
76     /// that shouldn't affect anything, other than maybe debuginfo.
77     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
78                   ty: Ty<'tcx>,
79                   repr: &ReprOptions,
80                   min: i128,
81                   max: i128)
82                   -> (Integer, bool) {
83         // Theoretically, negative values could be larger in unsigned representation
84         // than the unsigned representation of the signed minimum. However, if there
85         // are any negative values, the only valid unsigned representation is u128
86         // which can fit all i128 values, so the result remains unaffected.
87         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
88         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
89
90         let mut min_from_extern = None;
91         let min_default = I8;
92
93         if let Some(ity) = repr.int {
94             let discr = Integer::from_attr(tcx, ity);
95             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
96             if discr < fit {
97                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
98                   discriminant range of enum `{}", ty)
99             }
100             return (discr, ity.is_signed());
101         }
102
103         if repr.c() {
104             match &tcx.sess.target.target.arch[..] {
105                 // WARNING: the ARM EABI has two variants; the one corresponding
106                 // to `at_least == I32` appears to be used on Linux and NetBSD,
107                 // but some systems may use the variant corresponding to no
108                 // lower bound.  However, we don't run on those yet...?
109                 "arm" => min_from_extern = Some(I32),
110                 _ => min_from_extern = Some(I32),
111             }
112         }
113
114         let at_least = min_from_extern.unwrap_or(min_default);
115
116         // If there are no negative values, we can use the unsigned fit.
117         if min >= 0 {
118             (cmp::max(unsigned_fit, at_least), false)
119         } else {
120             (cmp::max(signed_fit, at_least), true)
121         }
122     }
123 }
124
125 pub trait PrimitiveExt {
126     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
127 }
128
129 impl PrimitiveExt for Primitive {
130     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
131         match *self {
132             Int(i, signed) => i.to_ty(tcx, signed),
133             Float(FloatTy::F32) => tcx.types.f32,
134             Float(FloatTy::F64) => tcx.types.f64,
135             Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
136         }
137     }
138 }
139
140 /// The first half of a fat pointer.
141 ///
142 /// - For a trait object, this is the address of the box.
143 /// - For a slice, this is the base address.
144 pub const FAT_PTR_ADDR: usize = 0;
145
146 /// The second half of a fat pointer.
147 ///
148 /// - For a trait object, this is the address of the vtable.
149 /// - For a slice, this is the length.
150 pub const FAT_PTR_EXTRA: usize = 1;
151
152 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
153 pub enum LayoutError<'tcx> {
154     Unknown(Ty<'tcx>),
155     SizeOverflow(Ty<'tcx>)
156 }
157
158 impl<'tcx> fmt::Display for LayoutError<'tcx> {
159     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
160         match *self {
161             LayoutError::Unknown(ty) => {
162                 write!(f, "the type `{:?}` has an unknown layout", ty)
163             }
164             LayoutError::SizeOverflow(ty) => {
165                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
166             }
167         }
168     }
169 }
170
171 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
172                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
173                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
174 {
175     ty::tls::with_related_context(tcx, move |icx| {
176         let rec_limit = *tcx.sess.recursion_limit.get();
177         let (param_env, ty) = query.into_parts();
178
179         if icx.layout_depth > rec_limit {
180             tcx.sess.fatal(
181                 &format!("overflow representing the type `{}`", ty));
182         }
183
184         // Update the ImplicitCtxt to increase the layout_depth
185         let icx = ty::tls::ImplicitCtxt {
186             layout_depth: icx.layout_depth + 1,
187             ..icx.clone()
188         };
189
190         ty::tls::enter_context(&icx, |_| {
191             let cx = LayoutCx { tcx, param_env };
192             cx.layout_raw_uncached(ty)
193         })
194     })
195 }
196
197 pub fn provide(providers: &mut ty::query::Providers) {
198     *providers = ty::query::Providers {
199         layout_raw,
200         ..*providers
201     };
202 }
203
204 #[derive(Copy, Clone)]
205 pub struct LayoutCx<'tcx, C> {
206     pub tcx: C,
207     pub param_env: ty::ParamEnv<'tcx>
208 }
209
210 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
211     fn layout_raw_uncached(self, ty: Ty<'tcx>)
212                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
213         let tcx = self.tcx;
214         let param_env = self.param_env;
215         let dl = self.data_layout();
216         let scalar_unit = |value: Primitive| {
217             let bits = value.size(dl).bits();
218             assert!(bits <= 128);
219             Scalar {
220                 value,
221                 valid_range: 0..=(!0 >> (128 - bits))
222             }
223         };
224         let scalar = |value: Primitive| {
225             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
226         };
227         let scalar_pair = |a: Scalar, b: Scalar| {
228             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
229             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
230             let size = (b_offset + b.value.size(dl)).abi_align(align);
231             LayoutDetails {
232                 variants: Variants::Single { index: 0 },
233                 fields: FieldPlacement::Arbitrary {
234                     offsets: vec![Size::ZERO, b_offset],
235                     memory_index: vec![0, 1]
236                 },
237                 abi: Abi::ScalarPair(a, b),
238                 align,
239                 size
240             }
241         };
242
243         #[derive(Copy, Clone, Debug)]
244         enum StructKind {
245             /// A tuple, closure, or univariant which cannot be coerced to unsized.
246             AlwaysSized,
247             /// A univariant, the last field of which may be coerced to unsized.
248             MaybeUnsized,
249             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
250             Prefixed(Size, Align),
251         }
252         let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
253             let packed = repr.packed();
254             if packed && repr.align > 0 {
255                 bug!("struct cannot be packed and aligned");
256             }
257
258             let pack = {
259                 let pack = repr.pack as u64;
260                 Align::from_bytes(pack, pack).unwrap()
261             };
262
263             let mut align = if packed {
264                 dl.i8_align
265             } else {
266                 dl.aggregate_align
267             };
268
269             let mut sized = true;
270             let mut offsets = vec![Size::ZERO; fields.len()];
271             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
272
273             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
274             if let StructKind::Prefixed(_, align) = kind {
275                 optimize &= align.abi() == 1;
276             }
277
278             if optimize {
279                 let end = if let StructKind::MaybeUnsized = kind {
280                     fields.len() - 1
281                 } else {
282                     fields.len()
283                 };
284                 let optimizing = &mut inverse_memory_index[..end];
285                 let field_align = |f: &TyLayout| {
286                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
287                 };
288                 match kind {
289                     StructKind::AlwaysSized |
290                     StructKind::MaybeUnsized => {
291                         optimizing.sort_by_key(|&x| {
292                             // Place ZSTs first to avoid "interesting offsets",
293                             // especially with only one or two non-ZST fields.
294                             let f = &fields[x as usize];
295                             (!f.is_zst(), cmp::Reverse(field_align(f)))
296                         });
297                     }
298                     StructKind::Prefixed(..) => {
299                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
300                     }
301                 }
302             }
303
304             // inverse_memory_index holds field indices by increasing memory offset.
305             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
306             // We now write field offsets to the corresponding offset slot;
307             // field 5 with offset 0 puts 0 in offsets[5].
308             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
309
310             let mut offset = Size::ZERO;
311
312             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
313                 if packed {
314                     let prefix_align = prefix_align.min(pack);
315                     align = align.max(prefix_align);
316                 } else {
317                     align = align.max(prefix_align);
318                 }
319                 offset = prefix_size.abi_align(prefix_align);
320             }
321
322             for &i in &inverse_memory_index {
323                 let field = fields[i as usize];
324                 if !sized {
325                     bug!("univariant: field #{} of `{}` comes after unsized field",
326                         offsets.len(), ty);
327                 }
328
329                 if field.is_unsized() {
330                     sized = false;
331                 }
332
333                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
334                 if packed {
335                     let field_pack = field.align.min(pack);
336                     offset = offset.abi_align(field_pack);
337                     align = align.max(field_pack);
338                 }
339                 else {
340                     offset = offset.abi_align(field.align);
341                     align = align.max(field.align);
342                 }
343
344                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
345                 offsets[i as usize] = offset;
346
347                 offset = offset.checked_add(field.size, dl)
348                     .ok_or(LayoutError::SizeOverflow(ty))?;
349             }
350
351             if repr.align > 0 {
352                 let repr_align = repr.align as u64;
353                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
354                 debug!("univariant repr_align: {:?}", repr_align);
355             }
356
357             debug!("univariant min_size: {:?}", offset);
358             let min_size = offset;
359
360             // As stated above, inverse_memory_index holds field indices by increasing offset.
361             // This makes it an already-sorted view of the offsets vec.
362             // To invert it, consider:
363             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
364             // Field 5 would be the first element, so memory_index is i:
365             // Note: if we didn't optimize, it's already right.
366
367             let mut memory_index;
368             if optimize {
369                 memory_index = vec![0; inverse_memory_index.len()];
370
371                 for i in 0..inverse_memory_index.len() {
372                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
373                 }
374             } else {
375                 memory_index = inverse_memory_index;
376             }
377
378             let size = min_size.abi_align(align);
379             let mut abi = Abi::Aggregate { sized };
380
381             // Unpack newtype ABIs and find scalar pairs.
382             if sized && size.bytes() > 0 {
383                 // All other fields must be ZSTs, and we need them to all start at 0.
384                 let mut zst_offsets =
385                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
386                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
387                     let mut non_zst_fields =
388                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
389
390                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
391                         // We have exactly one non-ZST field.
392                         (Some((i, field)), None, None) => {
393                             // Field fills the struct and it has a scalar or scalar pair ABI.
394                             if offsets[i].bytes() == 0 &&
395                                align.abi() == field.align.abi() &&
396                                size == field.size {
397                                 match field.abi {
398                                     // For plain scalars, or vectors of them, we can't unpack
399                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
400                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
401                                         abi = field.abi.clone();
402                                     }
403                                     // But scalar pairs are Rust-specific and get
404                                     // treated as aggregates by C ABIs anyway.
405                                     Abi::ScalarPair(..) => {
406                                         abi = field.abi.clone();
407                                     }
408                                     _ => {}
409                                 }
410                             }
411                         }
412
413                         // Two non-ZST fields, and they're both scalars.
414                         (Some((i, &TyLayout {
415                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
416                         })), Some((j, &TyLayout {
417                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
418                         })), None) => {
419                             // Order by the memory placement, not source order.
420                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
421                                 ((i, a), (j, b))
422                             } else {
423                                 ((j, b), (i, a))
424                             };
425                             let pair = scalar_pair(a.clone(), b.clone());
426                             let pair_offsets = match pair.fields {
427                                 FieldPlacement::Arbitrary {
428                                     ref offsets,
429                                     ref memory_index
430                                 } => {
431                                     assert_eq!(memory_index, &[0, 1]);
432                                     offsets
433                                 }
434                                 _ => bug!()
435                             };
436                             if offsets[i] == pair_offsets[0] &&
437                                offsets[j] == pair_offsets[1] &&
438                                align == pair.align &&
439                                size == pair.size {
440                                 // We can use `ScalarPair` only when it matches our
441                                 // already computed layout (including `#[repr(C)]`).
442                                 abi = pair.abi;
443                             }
444                         }
445
446                         _ => {}
447                     }
448                 }
449             }
450
451             if sized && fields.iter().any(|f| f.abi == Abi::Uninhabited) {
452                 abi = Abi::Uninhabited;
453             }
454
455             Ok(LayoutDetails {
456                 variants: Variants::Single { index: 0 },
457                 fields: FieldPlacement::Arbitrary {
458                     offsets,
459                     memory_index
460                 },
461                 abi,
462                 align,
463                 size
464             })
465         };
466         let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
467             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
468         };
469         debug_assert!(!ty.has_infer_types());
470
471         Ok(match ty.sty {
472             // Basic scalars.
473             ty::TyBool => {
474                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
475                     value: Int(I8, false),
476                     valid_range: 0..=1
477                 }))
478             }
479             ty::TyChar => {
480                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
481                     value: Int(I32, false),
482                     valid_range: 0..=0x10FFFF
483                 }))
484             }
485             ty::TyInt(ity) => {
486                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
487             }
488             ty::TyUint(ity) => {
489                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
490             }
491             ty::TyFloat(fty) => scalar(Float(fty)),
492             ty::TyFnPtr(_) => {
493                 let mut ptr = scalar_unit(Pointer);
494                 ptr.valid_range = 1..=*ptr.valid_range.end();
495                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
496             }
497
498             // The never type.
499             ty::TyNever => {
500                 tcx.intern_layout(LayoutDetails {
501                     variants: Variants::Single { index: 0 },
502                     fields: FieldPlacement::Union(0),
503                     abi: Abi::Uninhabited,
504                     align: dl.i8_align,
505                     size: Size::ZERO
506                 })
507             }
508
509             // Potentially-fat pointers.
510             ty::TyRef(_, pointee, _) |
511             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
512                 let mut data_ptr = scalar_unit(Pointer);
513                 if !ty.is_unsafe_ptr() {
514                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
515                 }
516
517                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
518                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
519                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
520                 }
521
522                 let unsized_part = tcx.struct_tail(pointee);
523                 let metadata = match unsized_part.sty {
524                     ty::TyForeign(..) => {
525                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
526                     }
527                     ty::TySlice(_) | ty::TyStr => {
528                         scalar_unit(Int(dl.ptr_sized_integer(), false))
529                     }
530                     ty::TyDynamic(..) => {
531                         let mut vtable = scalar_unit(Pointer);
532                         vtable.valid_range = 1..=*vtable.valid_range.end();
533                         vtable
534                     }
535                     _ => return Err(LayoutError::Unknown(unsized_part))
536                 };
537
538                 // Effectively a (ptr, meta) tuple.
539                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
540             }
541
542             // Arrays and slices.
543             ty::TyArray(element, mut count) => {
544                 if count.has_projections() {
545                     count = tcx.normalize_erasing_regions(param_env, count);
546                     if count.has_projections() {
547                         return Err(LayoutError::Unknown(ty));
548                     }
549                 }
550
551                 let element = self.layout_of(element)?;
552                 let count = count.unwrap_usize(tcx);
553                 let size = element.size.checked_mul(count, dl)
554                     .ok_or(LayoutError::SizeOverflow(ty))?;
555
556                 tcx.intern_layout(LayoutDetails {
557                     variants: Variants::Single { index: 0 },
558                     fields: FieldPlacement::Array {
559                         stride: element.size,
560                         count
561                     },
562                     abi: Abi::Aggregate { sized: true },
563                     align: element.align,
564                     size
565                 })
566             }
567             ty::TySlice(element) => {
568                 let element = self.layout_of(element)?;
569                 tcx.intern_layout(LayoutDetails {
570                     variants: Variants::Single { index: 0 },
571                     fields: FieldPlacement::Array {
572                         stride: element.size,
573                         count: 0
574                     },
575                     abi: Abi::Aggregate { sized: false },
576                     align: element.align,
577                     size: Size::ZERO
578                 })
579             }
580             ty::TyStr => {
581                 tcx.intern_layout(LayoutDetails {
582                     variants: Variants::Single { index: 0 },
583                     fields: FieldPlacement::Array {
584                         stride: Size::from_bytes(1),
585                         count: 0
586                     },
587                     abi: Abi::Aggregate { sized: false },
588                     align: dl.i8_align,
589                     size: Size::ZERO
590                 })
591             }
592
593             // Odd unit types.
594             ty::TyFnDef(..) => {
595                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
596             }
597             ty::TyDynamic(..) | ty::TyForeign(..) => {
598                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
599                   StructKind::AlwaysSized)?;
600                 match unit.abi {
601                     Abi::Aggregate { ref mut sized } => *sized = false,
602                     _ => bug!()
603                 }
604                 tcx.intern_layout(unit)
605             }
606
607             // Tuples, generators and closures.
608             ty::TyGenerator(def_id, ref substs, _) => {
609                 let tys = substs.field_tys(def_id, tcx);
610                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
611                     &ReprOptions::default(),
612                     StructKind::AlwaysSized)?
613             }
614
615             ty::TyClosure(def_id, ref substs) => {
616                 let tys = substs.upvar_tys(def_id, tcx);
617                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
618                     &ReprOptions::default(),
619                     StructKind::AlwaysSized)?
620             }
621
622             ty::TyTuple(tys) => {
623                 let kind = if tys.len() == 0 {
624                     StructKind::AlwaysSized
625                 } else {
626                     StructKind::MaybeUnsized
627                 };
628
629                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
630                     &ReprOptions::default(), kind)?
631             }
632
633             // SIMD vector types.
634             ty::TyAdt(def, ..) if def.repr.simd() => {
635                 let element = self.layout_of(ty.simd_type(tcx))?;
636                 let count = ty.simd_size(tcx) as u64;
637                 assert!(count > 0);
638                 let scalar = match element.abi {
639                     Abi::Scalar(ref scalar) => scalar.clone(),
640                     _ => {
641                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
642                                                 a non-machine element type `{}`",
643                                                 ty, element.ty));
644                     }
645                 };
646                 let size = element.size.checked_mul(count, dl)
647                     .ok_or(LayoutError::SizeOverflow(ty))?;
648                 let align = dl.vector_align(size);
649                 let size = size.abi_align(align);
650
651                 tcx.intern_layout(LayoutDetails {
652                     variants: Variants::Single { index: 0 },
653                     fields: FieldPlacement::Array {
654                         stride: element.size,
655                         count
656                     },
657                     abi: Abi::Vector {
658                         element: scalar,
659                         count
660                     },
661                     size,
662                     align,
663                 })
664             }
665
666             // ADTs.
667             ty::TyAdt(def, substs) => {
668                 // Cache the field layouts.
669                 let variants = def.variants.iter().map(|v| {
670                     v.fields.iter().map(|field| {
671                         self.layout_of(field.ty(tcx, substs))
672                     }).collect::<Result<Vec<_>, _>>()
673                 }).collect::<Result<Vec<_>, _>>()?;
674
675                 if def.is_union() {
676                     let packed = def.repr.packed();
677                     if packed && def.repr.align > 0 {
678                         bug!("Union cannot be packed and aligned");
679                     }
680
681                     let pack = {
682                         let pack = def.repr.pack as u64;
683                         Align::from_bytes(pack, pack).unwrap()
684                     };
685
686                     let mut align = if packed {
687                         dl.i8_align
688                     } else {
689                         dl.aggregate_align
690                     };
691
692                     if def.repr.align > 0 {
693                         let repr_align = def.repr.align as u64;
694                         align = align.max(
695                             Align::from_bytes(repr_align, repr_align).unwrap());
696                     }
697
698                     let mut size = Size::ZERO;
699                     for field in &variants[0] {
700                         assert!(!field.is_unsized());
701
702                         if packed {
703                             let field_pack = field.align.min(pack);
704                             align = align.max(field_pack);
705                         } else {
706                             align = align.max(field.align);
707                         }
708                         size = cmp::max(size, field.size);
709                     }
710
711                     return Ok(tcx.intern_layout(LayoutDetails {
712                         variants: Variants::Single { index: 0 },
713                         fields: FieldPlacement::Union(variants[0].len()),
714                         abi: Abi::Aggregate { sized: true },
715                         align,
716                         size: size.abi_align(align)
717                     }));
718                 }
719
720                 // A variant is absent if it's uninhabited and only has ZST fields.
721                 // Present uninhabited variants only require space for their fields,
722                 // but *not* an encoding of the discriminant (e.g. a tag value).
723                 // See issue #49298 for more details on the need to leave space
724                 // for non-ZST uninhabited data (mostly partial initialization).
725                 let absent = |fields: &[TyLayout]| {
726                     let uninhabited = fields.iter().any(|f| f.abi == Abi::Uninhabited);
727                     let is_zst = fields.iter().all(|f| f.is_zst());
728                     uninhabited && is_zst
729                 };
730                 let (present_first, present_second) = {
731                     let mut present_variants = (0..variants.len()).filter(|&v| {
732                         !absent(&variants[v])
733                     });
734                     (present_variants.next(), present_variants.next())
735                 };
736                 if present_first.is_none() {
737                     // Uninhabited because it has no variants, or only absent ones.
738                     return tcx.layout_raw(param_env.and(tcx.types.never));
739                 }
740
741                 let is_struct = !def.is_enum() ||
742                     // Only one variant is present.
743                     (present_second.is_none() &&
744                     // Representation optimizations are allowed.
745                      !def.repr.inhibit_enum_layout_opt());
746                 if is_struct {
747                     // Struct, or univariant enum equivalent to a struct.
748                     // (Typechecking will reject discriminant-sizing attrs.)
749
750                     let v = present_first.unwrap();
751                     let kind = if def.is_enum() || variants[v].len() == 0 {
752                         StructKind::AlwaysSized
753                     } else {
754                         let param_env = tcx.param_env(def.did);
755                         let last_field = def.variants[v].fields.last().unwrap();
756                         let always_sized = tcx.type_of(last_field.did)
757                           .is_sized(tcx.at(DUMMY_SP), param_env);
758                         if !always_sized { StructKind::MaybeUnsized }
759                         else { StructKind::AlwaysSized }
760                     };
761
762                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
763                     st.variants = Variants::Single { index: v };
764                     // Exclude 0 from the range of a newtype ABI NonZero<T>.
765                     if Some(def.did) == self.tcx.lang_items().non_zero() {
766                         match st.abi {
767                             Abi::Scalar(ref mut scalar) |
768                             Abi::ScalarPair(ref mut scalar, _) => {
769                                 if *scalar.valid_range.start() == 0 {
770                                     scalar.valid_range = 1..=*scalar.valid_range.end();
771                                 }
772                             }
773                             _ => {}
774                         }
775                     }
776                     return Ok(tcx.intern_layout(st));
777                 }
778
779                 // The current code for niche-filling relies on variant indices
780                 // instead of actual discriminants, so dataful enums with
781                 // explicit discriminants (RFC #2363) would misbehave.
782                 let no_explicit_discriminants = def.variants.iter().enumerate()
783                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
784
785                 // Niche-filling enum optimization.
786                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
787                     let mut dataful_variant = None;
788                     let mut niche_variants = usize::max_value()..=0;
789
790                     // Find one non-ZST variant.
791                     'variants: for (v, fields) in variants.iter().enumerate() {
792                         if absent(fields) {
793                             continue 'variants;
794                         }
795                         for f in fields {
796                             if !f.is_zst() {
797                                 if dataful_variant.is_none() {
798                                     dataful_variant = Some(v);
799                                     continue 'variants;
800                                 } else {
801                                     dataful_variant = None;
802                                     break 'variants;
803                                 }
804                             }
805                         }
806                         niche_variants = *niche_variants.start().min(&v)..=v;
807                     }
808
809                     if niche_variants.start() > niche_variants.end() {
810                         dataful_variant = None;
811                     }
812
813                     if let Some(i) = dataful_variant {
814                         let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
815                         for (field_index, &field) in variants[i].iter().enumerate() {
816                             let niche = match self.find_niche(field)? {
817                                 Some(niche) => niche,
818                                 _ => continue,
819                             };
820                             let (niche_start, niche_scalar) = match niche.reserve(self, count) {
821                                 Some(pair) => pair,
822                                 None => continue,
823                             };
824
825                             let mut align = dl.aggregate_align;
826                             let st = variants.iter().enumerate().map(|(j, v)| {
827                                 let mut st = univariant_uninterned(v,
828                                     &def.repr, StructKind::AlwaysSized)?;
829                                 st.variants = Variants::Single { index: j };
830
831                                 align = align.max(st.align);
832
833                                 Ok(st)
834                             }).collect::<Result<Vec<_>, _>>()?;
835
836                             let offset = st[i].fields.offset(field_index) + niche.offset;
837                             let size = st[i].size;
838
839                             let mut abi = match st[i].abi {
840                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
841                                 Abi::ScalarPair(ref first, ref second) => {
842                                     // We need to use scalar_unit to reset the
843                                     // valid range to the maximal one for that
844                                     // primitive, because only the niche is
845                                     // guaranteed to be initialised, not the
846                                     // other primitive.
847                                     if offset.bytes() == 0 {
848                                         Abi::ScalarPair(
849                                             niche_scalar.clone(),
850                                             scalar_unit(second.value),
851                                         )
852                                     } else {
853                                         Abi::ScalarPair(
854                                             scalar_unit(first.value),
855                                             niche_scalar.clone(),
856                                         )
857                                     }
858                                 }
859                                 _ => Abi::Aggregate { sized: true },
860                             };
861
862                             if st.iter().all(|v| v.abi == Abi::Uninhabited) {
863                                 abi = Abi::Uninhabited;
864                             }
865
866                             return Ok(tcx.intern_layout(LayoutDetails {
867                                 variants: Variants::NicheFilling {
868                                     dataful_variant: i,
869                                     niche_variants,
870                                     niche: niche_scalar,
871                                     niche_start,
872                                     variants: st,
873                                 },
874                                 fields: FieldPlacement::Arbitrary {
875                                     offsets: vec![offset],
876                                     memory_index: vec![0]
877                                 },
878                                 abi,
879                                 size,
880                                 align,
881                             }));
882                         }
883                     }
884                 }
885
886                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
887                 let discr_type = def.repr.discr_type();
888                 let bits = Integer::from_attr(tcx, discr_type).size().bits();
889                 for (i, discr) in def.discriminants(tcx).enumerate() {
890                     if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
891                         continue;
892                     }
893                     let mut x = discr.val as i128;
894                     if discr_type.is_signed() {
895                         // sign extend the raw representation to be an i128
896                         x = (x << (128 - bits)) >> (128 - bits);
897                     }
898                     if x < min { min = x; }
899                     if x > max { max = x; }
900                 }
901                 // We might have no inhabited variants, so pretend there's at least one.
902                 if (min, max) == (i128::max_value(), i128::min_value()) {
903                     min = 0;
904                     max = 0;
905                 }
906                 assert!(min <= max, "discriminant range is {}...{}", min, max);
907                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
908
909                 let mut align = dl.aggregate_align;
910                 let mut size = Size::ZERO;
911
912                 // We're interested in the smallest alignment, so start large.
913                 let mut start_align = Align::from_bytes(256, 256).unwrap();
914                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
915
916                 // repr(C) on an enum tells us to make a (tag, union) layout,
917                 // so we need to grow the prefix alignment to be at least
918                 // the alignment of the union. (This value is used both for
919                 // determining the alignment of the overall enum, and the
920                 // determining the alignment of the payload after the tag.)
921                 let mut prefix_align = min_ity.align(dl);
922                 if def.repr.c() {
923                     for fields in &variants {
924                         for field in fields {
925                             prefix_align = prefix_align.max(field.align);
926                         }
927                     }
928                 }
929
930                 // Create the set of structs that represent each variant.
931                 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
932                     let mut st = univariant_uninterned(&field_layouts,
933                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
934                     st.variants = Variants::Single { index: i };
935                     // Find the first field we can't move later
936                     // to make room for a larger discriminant.
937                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
938                         if !field.is_zst() || field.align.abi() != 1 {
939                             start_align = start_align.min(field.align);
940                             break;
941                         }
942                     }
943                     size = cmp::max(size, st.size);
944                     align = align.max(st.align);
945                     Ok(st)
946                 }).collect::<Result<Vec<_>, _>>()?;
947
948                 // Align the maximum variant size to the largest alignment.
949                 size = size.abi_align(align);
950
951                 if size.bytes() >= dl.obj_size_bound() {
952                     return Err(LayoutError::SizeOverflow(ty));
953                 }
954
955                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
956                 if typeck_ity < min_ity {
957                     // It is a bug if Layout decided on a greater discriminant size than typeck for
958                     // some reason at this point (based on values discriminant can take on). Mostly
959                     // because this discriminant will be loaded, and then stored into variable of
960                     // type calculated by typeck. Consider such case (a bug): typeck decided on
961                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
962                     // discriminant values. That would be a bug, because then, in codegen, in order
963                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
964                     // space necessary to represent would have to be discarded (or layout is wrong
965                     // on thinking it needs 16 bits)
966                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
967                          min_ity, typeck_ity);
968                     // However, it is fine to make discr type however large (as an optimisation)
969                     // after this point â€“ we’ll just truncate the value we load in codegen.
970                 }
971
972                 // Check to see if we should use a different type for the
973                 // discriminant. We can safely use a type with the same size
974                 // as the alignment of the first field of each variant.
975                 // We increase the size of the discriminant to avoid LLVM copying
976                 // padding when it doesn't need to. This normally causes unaligned
977                 // load/stores and excessive memcpy/memset operations. By using a
978                 // bigger integer size, LLVM can be sure about its contents and
979                 // won't be so conservative.
980
981                 // Use the initial field alignment
982                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
983                     min_ity
984                 } else {
985                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
986                 };
987
988                 // If the alignment is not larger than the chosen discriminant size,
989                 // don't use the alignment as the final size.
990                 if ity <= min_ity {
991                     ity = min_ity;
992                 } else {
993                     // Patch up the variants' first few fields.
994                     let old_ity_size = min_ity.size();
995                     let new_ity_size = ity.size();
996                     for variant in &mut layout_variants {
997                         match variant.fields {
998                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
999                                 for i in offsets {
1000                                     if *i <= old_ity_size {
1001                                         assert_eq!(*i, old_ity_size);
1002                                         *i = new_ity_size;
1003                                     }
1004                                 }
1005                                 // We might be making the struct larger.
1006                                 if variant.size <= old_ity_size {
1007                                     variant.size = new_ity_size;
1008                                 }
1009                             }
1010                             _ => bug!()
1011                         }
1012                     }
1013                 }
1014
1015                 let tag_mask = !0u128 >> (128 - ity.size().bits());
1016                 let tag = Scalar {
1017                     value: Int(ity, signed),
1018                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1019                 };
1020                 let mut abi = Abi::Aggregate { sized: true };
1021                 if tag.value.size(dl) == size {
1022                     abi = Abi::Scalar(tag.clone());
1023                 } else {
1024                     // Try to use a ScalarPair for all tagged enums.
1025                     let mut common_prim = None;
1026                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1027                         let offsets = match layout_variant.fields {
1028                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1029                             _ => bug!(),
1030                         };
1031                         let mut fields = field_layouts
1032                             .iter()
1033                             .zip(offsets)
1034                             .filter(|p| !p.0.is_zst());
1035                         let (field, offset) = match (fields.next(), fields.next()) {
1036                             (None, None) => continue,
1037                             (Some(pair), None) => pair,
1038                             _ => {
1039                                 common_prim = None;
1040                                 break;
1041                             }
1042                         };
1043                         let prim = match field.details.abi {
1044                             Abi::Scalar(ref scalar) => scalar.value,
1045                             _ => {
1046                                 common_prim = None;
1047                                 break;
1048                             }
1049                         };
1050                         if let Some(pair) = common_prim {
1051                             // This is pretty conservative. We could go fancier
1052                             // by conflating things like i32 and u32, or even
1053                             // realising that (u8, u8) could just cohabit with
1054                             // u16 or even u32.
1055                             if pair != (prim, offset) {
1056                                 common_prim = None;
1057                                 break;
1058                             }
1059                         } else {
1060                             common_prim = Some((prim, offset));
1061                         }
1062                     }
1063                     if let Some((prim, offset)) = common_prim {
1064                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1065                         let pair_offsets = match pair.fields {
1066                             FieldPlacement::Arbitrary {
1067                                 ref offsets,
1068                                 ref memory_index
1069                             } => {
1070                                 assert_eq!(memory_index, &[0, 1]);
1071                                 offsets
1072                             }
1073                             _ => bug!()
1074                         };
1075                         if pair_offsets[0] == Size::ZERO &&
1076                             pair_offsets[1] == *offset &&
1077                             align == pair.align &&
1078                             size == pair.size {
1079                             // We can use `ScalarPair` only when it matches our
1080                             // already computed layout (including `#[repr(C)]`).
1081                             abi = pair.abi;
1082                         }
1083                     }
1084                 }
1085
1086                 if layout_variants.iter().all(|v| v.abi == Abi::Uninhabited) {
1087                     abi = Abi::Uninhabited;
1088                 }
1089
1090                 tcx.intern_layout(LayoutDetails {
1091                     variants: Variants::Tagged {
1092                         tag,
1093                         variants: layout_variants,
1094                     },
1095                     fields: FieldPlacement::Arbitrary {
1096                         offsets: vec![Size::ZERO],
1097                         memory_index: vec![0]
1098                     },
1099                     abi,
1100                     align,
1101                     size
1102                 })
1103             }
1104
1105             // Types with no meaningful known layout.
1106             ty::TyProjection(_) | ty::TyAnon(..) => {
1107                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1108                 if ty == normalized {
1109                     return Err(LayoutError::Unknown(ty));
1110                 }
1111                 tcx.layout_raw(param_env.and(normalized))?
1112             }
1113             ty::TyGeneratorWitness(..) | ty::TyInfer(_) => {
1114                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1115             }
1116             ty::TyParam(_) | ty::TyError => {
1117                 return Err(LayoutError::Unknown(ty));
1118             }
1119         })
1120     }
1121
1122     /// This is invoked by the `layout_raw` query to record the final
1123     /// layout of each type.
1124     #[inline]
1125     fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1126         // If we are running with `-Zprint-type-sizes`, record layouts for
1127         // dumping later. Ignore layouts that are done with non-empty
1128         // environments or non-monomorphic layouts, as the user only wants
1129         // to see the stuff resulting from the final codegen session.
1130         if
1131             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1132             layout.ty.has_param_types() ||
1133             layout.ty.has_self_ty() ||
1134             !self.param_env.caller_bounds.is_empty()
1135         {
1136             return;
1137         }
1138
1139         self.record_layout_for_printing_outlined(layout)
1140     }
1141
1142     fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1143         // (delay format until we actually need it)
1144         let record = |kind, packed, opt_discr_size, variants| {
1145             let type_desc = format!("{:?}", layout.ty);
1146             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1147                                                                    type_desc,
1148                                                                    layout.align,
1149                                                                    layout.size,
1150                                                                    packed,
1151                                                                    opt_discr_size,
1152                                                                    variants);
1153         };
1154
1155         let adt_def = match layout.ty.sty {
1156             ty::TyAdt(ref adt_def, _) => {
1157                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1158                 adt_def
1159             }
1160
1161             ty::TyClosure(..) => {
1162                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1163                 record(DataTypeKind::Closure, false, None, vec![]);
1164                 return;
1165             }
1166
1167             _ => {
1168                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1169                 return;
1170             }
1171         };
1172
1173         let adt_kind = adt_def.adt_kind();
1174         let adt_packed = adt_def.repr.packed();
1175
1176         let build_variant_info = |n: Option<ast::Name>,
1177                                   flds: &[ast::Name],
1178                                   layout: TyLayout<'tcx>| {
1179             let mut min_size = Size::ZERO;
1180             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1181                 match layout.field(self, i) {
1182                     Err(err) => {
1183                         bug!("no layout found for field {}: `{:?}`", name, err);
1184                     }
1185                     Ok(field_layout) => {
1186                         let offset = layout.fields.offset(i);
1187                         let field_end = offset + field_layout.size;
1188                         if min_size < field_end {
1189                             min_size = field_end;
1190                         }
1191                         session::FieldInfo {
1192                             name: name.to_string(),
1193                             offset: offset.bytes(),
1194                             size: field_layout.size.bytes(),
1195                             align: field_layout.align.abi(),
1196                         }
1197                     }
1198                 }
1199             }).collect();
1200
1201             session::VariantInfo {
1202                 name: n.map(|n|n.to_string()),
1203                 kind: if layout.is_unsized() {
1204                     session::SizeKind::Min
1205                 } else {
1206                     session::SizeKind::Exact
1207                 },
1208                 align: layout.align.abi(),
1209                 size: if min_size.bytes() == 0 {
1210                     layout.size.bytes()
1211                 } else {
1212                     min_size.bytes()
1213                 },
1214                 fields: field_info,
1215             }
1216         };
1217
1218         match layout.variants {
1219             Variants::Single { index } => {
1220                 debug!("print-type-size `{:#?}` variant {}",
1221                        layout, adt_def.variants[index].name);
1222                 if !adt_def.variants.is_empty() {
1223                     let variant_def = &adt_def.variants[index];
1224                     let fields: Vec<_> =
1225                         variant_def.fields.iter().map(|f| f.ident.name).collect();
1226                     record(adt_kind.into(),
1227                            adt_packed,
1228                            None,
1229                            vec![build_variant_info(Some(variant_def.name),
1230                                                    &fields,
1231                                                    layout)]);
1232                 } else {
1233                     // (This case arises for *empty* enums; so give it
1234                     // zero variants.)
1235                     record(adt_kind.into(), adt_packed, None, vec![]);
1236                 }
1237             }
1238
1239             Variants::NicheFilling { .. } |
1240             Variants::Tagged { .. } => {
1241                 debug!("print-type-size `{:#?}` adt general variants def {}",
1242                        layout.ty, adt_def.variants.len());
1243                 let variant_infos: Vec<_> =
1244                     adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1245                         let fields: Vec<_> =
1246                             variant_def.fields.iter().map(|f| f.ident.name).collect();
1247                         build_variant_info(Some(variant_def.name),
1248                                             &fields,
1249                                             layout.for_variant(self, i))
1250                     })
1251                     .collect();
1252                 record(adt_kind.into(), adt_packed, match layout.variants {
1253                     Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1254                     _ => None
1255                 }, variant_infos);
1256             }
1257         }
1258     }
1259 }
1260
1261 /// Type size "skeleton", i.e. the only information determining a type's size.
1262 /// While this is conservative, (aside from constant sizes, only pointers,
1263 /// newtypes thereof and null pointer optimized enums are allowed), it is
1264 /// enough to statically check common usecases of transmute.
1265 #[derive(Copy, Clone, Debug)]
1266 pub enum SizeSkeleton<'tcx> {
1267     /// Any statically computable Layout.
1268     Known(Size),
1269
1270     /// A potentially-fat pointer.
1271     Pointer {
1272         /// If true, this pointer is never null.
1273         non_zero: bool,
1274         /// The type which determines the unsized metadata, if any,
1275         /// of this pointer. Either a type parameter or a projection
1276         /// depending on one, with regions erased.
1277         tail: Ty<'tcx>
1278     }
1279 }
1280
1281 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1282     pub fn compute(ty: Ty<'tcx>,
1283                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1284                    param_env: ty::ParamEnv<'tcx>)
1285                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1286         debug_assert!(!ty.has_infer_types());
1287
1288         // First try computing a static layout.
1289         let err = match tcx.layout_of(param_env.and(ty)) {
1290             Ok(layout) => {
1291                 return Ok(SizeSkeleton::Known(layout.size));
1292             }
1293             Err(err) => err
1294         };
1295
1296         match ty.sty {
1297             ty::TyRef(_, pointee, _) |
1298             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1299                 let non_zero = !ty.is_unsafe_ptr();
1300                 let tail = tcx.struct_tail(pointee);
1301                 match tail.sty {
1302                     ty::TyParam(_) | ty::TyProjection(_) => {
1303                         debug_assert!(tail.has_param_types() || tail.has_self_ty());
1304                         Ok(SizeSkeleton::Pointer {
1305                             non_zero,
1306                             tail: tcx.erase_regions(&tail)
1307                         })
1308                     }
1309                     _ => {
1310                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1311                               tail `{}` is not a type parameter or a projection",
1312                              ty, err, tail)
1313                     }
1314                 }
1315             }
1316
1317             ty::TyAdt(def, substs) => {
1318                 // Only newtypes and enums w/ nullable pointer optimization.
1319                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1320                     return Err(err);
1321                 }
1322
1323                 // Get a zero-sized variant or a pointer newtype.
1324                 let zero_or_ptr_variant = |i: usize| {
1325                     let fields = def.variants[i].fields.iter().map(|field| {
1326                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1327                     });
1328                     let mut ptr = None;
1329                     for field in fields {
1330                         let field = field?;
1331                         match field {
1332                             SizeSkeleton::Known(size) => {
1333                                 if size.bytes() > 0 {
1334                                     return Err(err);
1335                                 }
1336                             }
1337                             SizeSkeleton::Pointer {..} => {
1338                                 if ptr.is_some() {
1339                                     return Err(err);
1340                                 }
1341                                 ptr = Some(field);
1342                             }
1343                         }
1344                     }
1345                     Ok(ptr)
1346                 };
1347
1348                 let v0 = zero_or_ptr_variant(0)?;
1349                 // Newtype.
1350                 if def.variants.len() == 1 {
1351                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1352                         return Ok(SizeSkeleton::Pointer {
1353                             non_zero: non_zero ||
1354                                 Some(def.did) == tcx.lang_items().non_zero(),
1355                             tail,
1356                         });
1357                     } else {
1358                         return Err(err);
1359                     }
1360                 }
1361
1362                 let v1 = zero_or_ptr_variant(1)?;
1363                 // Nullable pointer enum optimization.
1364                 match (v0, v1) {
1365                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1366                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1367                         Ok(SizeSkeleton::Pointer {
1368                             non_zero: false,
1369                             tail,
1370                         })
1371                     }
1372                     _ => Err(err)
1373                 }
1374             }
1375
1376             ty::TyProjection(_) | ty::TyAnon(..) => {
1377                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1378                 if ty == normalized {
1379                     Err(err)
1380                 } else {
1381                     SizeSkeleton::compute(normalized, tcx, param_env)
1382                 }
1383             }
1384
1385             _ => Err(err)
1386         }
1387     }
1388
1389     pub fn same_size(self, other: SizeSkeleton) -> bool {
1390         match (self, other) {
1391             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1392             (SizeSkeleton::Pointer { tail: a, .. },
1393              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1394             _ => false
1395         }
1396     }
1397 }
1398
1399 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1400     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1401 }
1402
1403 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1404     fn data_layout(&self) -> &TargetDataLayout {
1405         &self.data_layout
1406     }
1407 }
1408
1409 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1410     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1411         self.global_tcx()
1412     }
1413 }
1414
1415 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1416     fn data_layout(&self) -> &TargetDataLayout {
1417         self.tcx.data_layout()
1418     }
1419 }
1420
1421 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1422     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1423         self.tcx.tcx()
1424     }
1425 }
1426
1427 pub trait MaybeResult<T> {
1428     fn from_ok(x: T) -> Self;
1429     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1430 }
1431
1432 impl<T> MaybeResult<T> for T {
1433     fn from_ok(x: T) -> Self {
1434         x
1435     }
1436     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1437         f(self)
1438     }
1439 }
1440
1441 impl<T, E> MaybeResult<T> for Result<T, E> {
1442     fn from_ok(x: T) -> Self {
1443         Ok(x)
1444     }
1445     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1446         self.map(f)
1447     }
1448 }
1449
1450 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1451
1452 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1453     type Ty = Ty<'tcx>;
1454     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1455
1456     /// Computes the layout of a type. Note that this implicitly
1457     /// executes in "reveal all" mode.
1458     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1459         let param_env = self.param_env.with_reveal_all();
1460         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1461         let details = self.tcx.layout_raw(param_env.and(ty))?;
1462         let layout = TyLayout {
1463             ty,
1464             details
1465         };
1466
1467         // NB: This recording is normally disabled; when enabled, it
1468         // can however trigger recursive invocations of `layout_of`.
1469         // Therefore, we execute it *after* the main query has
1470         // completed, to avoid problems around recursive structures
1471         // and the like. (Admittedly, I wasn't able to reproduce a problem
1472         // here, but it seems like the right thing to do. -nmatsakis)
1473         self.record_layout_for_printing(layout);
1474
1475         Ok(layout)
1476     }
1477 }
1478
1479 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1480     type Ty = Ty<'tcx>;
1481     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1482
1483     /// Computes the layout of a type. Note that this implicitly
1484     /// executes in "reveal all" mode.
1485     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1486         let param_env = self.param_env.with_reveal_all();
1487         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1488         let details = self.tcx.layout_raw(param_env.and(ty))?;
1489         let layout = TyLayout {
1490             ty,
1491             details
1492         };
1493
1494         // NB: This recording is normally disabled; when enabled, it
1495         // can however trigger recursive invocations of `layout_of`.
1496         // Therefore, we execute it *after* the main query has
1497         // completed, to avoid problems around recursive structures
1498         // and the like. (Admittedly, I wasn't able to reproduce a problem
1499         // here, but it seems like the right thing to do. -nmatsakis)
1500         let cx = LayoutCx {
1501             tcx: *self.tcx,
1502             param_env: self.param_env
1503         };
1504         cx.record_layout_for_printing(layout);
1505
1506         Ok(layout)
1507     }
1508 }
1509
1510 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1511 impl TyCtxt<'a, 'tcx, '_> {
1512     /// Computes the layout of a type. Note that this implicitly
1513     /// executes in "reveal all" mode.
1514     #[inline]
1515     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1516                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1517         let cx = LayoutCx {
1518             tcx: self.global_tcx(),
1519             param_env: param_env_and_ty.param_env
1520         };
1521         cx.layout_of(param_env_and_ty.value)
1522     }
1523 }
1524
1525 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1526     /// Computes the layout of a type. Note that this implicitly
1527     /// executes in "reveal all" mode.
1528     #[inline]
1529     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1530                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1531         let cx = LayoutCx {
1532             tcx: self.global_tcx().at(self.span),
1533             param_env: param_env_and_ty.param_env
1534         };
1535         cx.layout_of(param_env_and_ty.value)
1536     }
1537 }
1538
1539 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1540     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1541           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1542 {
1543     fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1544         let details = match this.variants {
1545             Variants::Single { index } if index == variant_index => this.details,
1546
1547             Variants::Single { index } => {
1548                 // Deny calling for_variant more than once for non-Single enums.
1549                 cx.layout_of(this.ty).map_same(|layout| {
1550                     assert_eq!(layout.variants, Variants::Single { index });
1551                     layout
1552                 });
1553
1554                 let fields = match this.ty.sty {
1555                     ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
1556                     _ => bug!()
1557                 };
1558                 let tcx = cx.tcx();
1559                 tcx.intern_layout(LayoutDetails {
1560                     variants: Variants::Single { index: variant_index },
1561                     fields: FieldPlacement::Union(fields),
1562                     abi: Abi::Uninhabited,
1563                     align: tcx.data_layout.i8_align,
1564                     size: Size::ZERO
1565                 })
1566             }
1567
1568             Variants::NicheFilling { ref variants, .. } |
1569             Variants::Tagged { ref variants, .. } => {
1570                 &variants[variant_index]
1571             }
1572         };
1573
1574         assert_eq!(details.variants, Variants::Single { index: variant_index });
1575
1576         TyLayout {
1577             ty: this.ty,
1578             details
1579         }
1580     }
1581
1582     fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1583         let tcx = cx.tcx();
1584         cx.layout_of(match this.ty.sty {
1585             ty::TyBool |
1586             ty::TyChar |
1587             ty::TyInt(_) |
1588             ty::TyUint(_) |
1589             ty::TyFloat(_) |
1590             ty::TyFnPtr(_) |
1591             ty::TyNever |
1592             ty::TyFnDef(..) |
1593             ty::TyGeneratorWitness(..) |
1594             ty::TyForeign(..) |
1595             ty::TyDynamic(..) => {
1596                 bug!("TyLayout::field_type({:?}): not applicable", this)
1597             }
1598
1599             // Potentially-fat pointers.
1600             ty::TyRef(_, pointee, _) |
1601             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1602                 assert!(i < this.fields.count());
1603
1604                 // Reuse the fat *T type as its own thin pointer data field.
1605                 // This provides information about e.g. DST struct pointees
1606                 // (which may have no non-DST form), and will work as long
1607                 // as the `Abi` or `FieldPlacement` is checked by users.
1608                 if i == 0 {
1609                     let nil = tcx.mk_nil();
1610                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1611                         tcx.mk_mut_ptr(nil)
1612                     } else {
1613                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1614                     };
1615                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1616                         ptr_layout.ty = this.ty;
1617                         ptr_layout
1618                     });
1619                 }
1620
1621                 match tcx.struct_tail(pointee).sty {
1622                     ty::TySlice(_) |
1623                     ty::TyStr => tcx.types.usize,
1624                     ty::TyDynamic(data, _) => {
1625                         let trait_def_id = data.principal().unwrap().def_id();
1626                         let num_fns: u64 = crate::traits::supertrait_def_ids(tcx, trait_def_id)
1627                             .map(|trait_def_id| {
1628                                 tcx.associated_items(trait_def_id)
1629                                     .filter(|item| item.kind == ty::AssociatedKind::Method)
1630                                     .count() as u64
1631                             })
1632                             .sum();
1633                         tcx.mk_imm_ref(
1634                             tcx.types.re_static,
1635                             tcx.mk_array(tcx.types.usize, 3 + num_fns),
1636                         )
1637                         /* FIXME use actual fn pointers
1638                         tcx.mk_tup(&[
1639                             tcx.mk_array(tcx.types.usize, 3),
1640                             tcx.mk_array(Option<fn()>),
1641                         ])
1642                         */
1643                     }
1644                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1645                 }
1646             }
1647
1648             // Arrays and slices.
1649             ty::TyArray(element, _) |
1650             ty::TySlice(element) => element,
1651             ty::TyStr => tcx.types.u8,
1652
1653             // Tuples, generators and closures.
1654             ty::TyClosure(def_id, ref substs) => {
1655                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1656             }
1657
1658             ty::TyGenerator(def_id, ref substs, _) => {
1659                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1660             }
1661
1662             ty::TyTuple(tys) => tys[i],
1663
1664             // SIMD vector types.
1665             ty::TyAdt(def, ..) if def.repr.simd() => {
1666                 this.ty.simd_type(tcx)
1667             }
1668
1669             // ADTs.
1670             ty::TyAdt(def, substs) => {
1671                 match this.variants {
1672                     Variants::Single { index } => {
1673                         def.variants[index].fields[i].ty(tcx, substs)
1674                     }
1675
1676                     // Discriminant field for enums (where applicable).
1677                     Variants::Tagged { tag: ref discr, .. } |
1678                     Variants::NicheFilling { niche: ref discr, .. } => {
1679                         assert_eq!(i, 0);
1680                         let layout = LayoutDetails::scalar(tcx, discr.clone());
1681                         return MaybeResult::from_ok(TyLayout {
1682                             details: tcx.intern_layout(layout),
1683                             ty: discr.value.to_ty(tcx)
1684                         });
1685                     }
1686                 }
1687             }
1688
1689             ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
1690             ty::TyInfer(_) | ty::TyError => {
1691                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1692             }
1693         })
1694     }
1695 }
1696
1697 struct Niche {
1698     offset: Size,
1699     scalar: Scalar,
1700     available: u128,
1701 }
1702
1703 impl Niche {
1704     fn reserve<'a, 'tcx>(
1705         &self,
1706         cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1707         count: u128,
1708     ) -> Option<(u128, Scalar)> {
1709         if count > self.available {
1710             return None;
1711         }
1712         let Scalar { value, valid_range: ref v } = self.scalar;
1713         let bits = value.size(cx).bits();
1714         assert!(bits <= 128);
1715         let max_value = !0u128 >> (128 - bits);
1716         let start = v.end().wrapping_add(1) & max_value;
1717         let end = v.end().wrapping_add(count) & max_value;
1718         Some((start, Scalar { value, valid_range: *v.start()..=end }))
1719     }
1720 }
1721
1722 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1723     /// Find the offset of a niche leaf field, starting from
1724     /// the given type and recursing through aggregates.
1725     // FIXME(eddyb) traverse already optimized enums.
1726     fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1727         let scalar_niche = |scalar: &Scalar, offset| {
1728             let Scalar { value, valid_range: ref v } = *scalar;
1729
1730             let bits = value.size(self).bits();
1731             assert!(bits <= 128);
1732             let max_value = !0u128 >> (128 - bits);
1733
1734             // Find out how many values are outside the valid range.
1735             let available = if v.start() <= v.end() {
1736                 v.start() + (max_value - v.end())
1737             } else {
1738                 v.start() - v.end() - 1
1739             };
1740
1741             // Give up if there is no niche value available.
1742             if available == 0 {
1743                 return None;
1744             }
1745
1746             Some(Niche { offset, scalar: scalar.clone(), available })
1747         };
1748
1749         // Locals variables which live across yields are stored
1750         // in the generator type as fields. These may be uninitialized
1751         // so we don't look for niches there.
1752         if let ty::TyGenerator(..) = layout.ty.sty {
1753             return Ok(None);
1754         }
1755
1756         match layout.abi {
1757             Abi::Scalar(ref scalar) => {
1758                 return Ok(scalar_niche(scalar, Size::ZERO));
1759             }
1760             Abi::ScalarPair(ref a, ref b) => {
1761                 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1762                 // returns the last maximum.
1763                 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1764                     .chain(iter::once((a, Size::ZERO)))
1765                     .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1766                     .max_by_key(|niche| niche.available);
1767                 return Ok(niche);
1768             }
1769             Abi::Vector { ref element, .. } => {
1770                 return Ok(scalar_niche(element, Size::ZERO));
1771             }
1772             _ => {}
1773         }
1774
1775         // Perhaps one of the fields is non-zero, let's recurse and find out.
1776         if let FieldPlacement::Union(_) = layout.fields {
1777             // Only Rust enums have safe-to-inspect fields
1778             // (a discriminant), other unions are unsafe.
1779             if let Variants::Single { .. } = layout.variants {
1780                 return Ok(None);
1781             }
1782         }
1783         if let FieldPlacement::Array { .. } = layout.fields {
1784             if layout.fields.count() > 0 {
1785                 return self.find_niche(layout.field(self, 0)?);
1786             } else {
1787                 return Ok(None);
1788             }
1789         }
1790         let mut niche = None;
1791         let mut available = 0;
1792         for i in 0..layout.fields.count() {
1793             if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1794                 if c.available > available {
1795                     available = c.available;
1796                     c.offset += layout.fields.offset(i);
1797                     niche = Some(c);
1798                 }
1799             }
1800         }
1801         Ok(niche)
1802     }
1803 }
1804
1805 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1806     fn hash_stable<W: StableHasherResult>(&self,
1807                                           hcx: &mut StableHashingContext<'a>,
1808                                           hasher: &mut StableHasher<W>) {
1809         use ty::layout::Variants::*;
1810         mem::discriminant(self).hash_stable(hcx, hasher);
1811
1812         match *self {
1813             Single { index } => {
1814                 index.hash_stable(hcx, hasher);
1815             }
1816             Tagged {
1817                 ref tag,
1818                 ref variants,
1819             } => {
1820                 tag.hash_stable(hcx, hasher);
1821                 variants.hash_stable(hcx, hasher);
1822             }
1823             NicheFilling {
1824                 dataful_variant,
1825                 ref niche_variants,
1826                 ref niche,
1827                 niche_start,
1828                 ref variants,
1829             } => {
1830                 dataful_variant.hash_stable(hcx, hasher);
1831                 niche_variants.start().hash_stable(hcx, hasher);
1832                 niche_variants.end().hash_stable(hcx, hasher);
1833                 niche.hash_stable(hcx, hasher);
1834                 niche_start.hash_stable(hcx, hasher);
1835                 variants.hash_stable(hcx, hasher);
1836             }
1837         }
1838     }
1839 }
1840
1841 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1842     fn hash_stable<W: StableHasherResult>(&self,
1843                                           hcx: &mut StableHashingContext<'a>,
1844                                           hasher: &mut StableHasher<W>) {
1845         use ty::layout::FieldPlacement::*;
1846         mem::discriminant(self).hash_stable(hcx, hasher);
1847
1848         match *self {
1849             Union(count) => {
1850                 count.hash_stable(hcx, hasher);
1851             }
1852             Array { count, stride } => {
1853                 count.hash_stable(hcx, hasher);
1854                 stride.hash_stable(hcx, hasher);
1855             }
1856             Arbitrary { ref offsets, ref memory_index } => {
1857                 offsets.hash_stable(hcx, hasher);
1858                 memory_index.hash_stable(hcx, hasher);
1859             }
1860         }
1861     }
1862 }
1863
1864 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1865     fn hash_stable<W: StableHasherResult>(&self,
1866                                           hcx: &mut StableHashingContext<'a>,
1867                                           hasher: &mut StableHasher<W>) {
1868         use ty::layout::Abi::*;
1869         mem::discriminant(self).hash_stable(hcx, hasher);
1870
1871         match *self {
1872             Uninhabited => {}
1873             Scalar(ref value) => {
1874                 value.hash_stable(hcx, hasher);
1875             }
1876             ScalarPair(ref a, ref b) => {
1877                 a.hash_stable(hcx, hasher);
1878                 b.hash_stable(hcx, hasher);
1879             }
1880             Vector { ref element, count } => {
1881                 element.hash_stable(hcx, hasher);
1882                 count.hash_stable(hcx, hasher);
1883             }
1884             Aggregate { sized } => {
1885                 sized.hash_stable(hcx, hasher);
1886             }
1887         }
1888     }
1889 }
1890
1891 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1892     fn hash_stable<W: StableHasherResult>(&self,
1893                                           hcx: &mut StableHashingContext<'a>,
1894                                           hasher: &mut StableHasher<W>) {
1895         let Scalar { value, ref valid_range } = *self;
1896         value.hash_stable(hcx, hasher);
1897         valid_range.start().hash_stable(hcx, hasher);
1898         valid_range.end().hash_stable(hcx, hasher);
1899     }
1900 }
1901
1902 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1903     variants,
1904     fields,
1905     abi,
1906     size,
1907     align
1908 });
1909
1910 impl_stable_hash_for!(enum ::ty::layout::Integer {
1911     I8,
1912     I16,
1913     I32,
1914     I64,
1915     I128
1916 });
1917
1918 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1919     Int(integer, signed),
1920     Float(fty),
1921     Pointer
1922 });
1923
1924 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1925     fn hash_stable<W: StableHasherResult>(&self,
1926                                           hcx: &mut StableHashingContext<'gcx>,
1927                                           hasher: &mut StableHasher<W>) {
1928         self.abi().hash_stable(hcx, hasher);
1929         self.pref().hash_stable(hcx, hasher);
1930     }
1931 }
1932
1933 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1934     fn hash_stable<W: StableHasherResult>(&self,
1935                                           hcx: &mut StableHashingContext<'gcx>,
1936                                           hasher: &mut StableHasher<W>) {
1937         self.bytes().hash_stable(hcx, hasher);
1938     }
1939 }
1940
1941 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1942 {
1943     fn hash_stable<W: StableHasherResult>(&self,
1944                                           hcx: &mut StableHashingContext<'a>,
1945                                           hasher: &mut StableHasher<W>) {
1946         use ty::layout::LayoutError::*;
1947         mem::discriminant(self).hash_stable(hcx, hasher);
1948
1949         match *self {
1950             Unknown(t) |
1951             SizeOverflow(t) => t.hash_stable(hcx, hasher)
1952         }
1953     }
1954 }