]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Auto merge of #50409 - KiChjang:issue-50343, r=nikomatsakis
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
13
14 use syntax::ast::{self, FloatTy, IntTy, UintTy};
15 use syntax::attr;
16 use syntax_pos::DUMMY_SP;
17
18 use std::cmp;
19 use std::fmt;
20 use std::i128;
21 use std::mem;
22
23 use ich::StableHashingContext;
24 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
25                                            StableHasherResult};
26
27 pub use rustc_target::abi::*;
28
29 pub trait IntegerExt {
30     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
31     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
32     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
33                   ty: Ty<'tcx>,
34                   repr: &ReprOptions,
35                   min: i128,
36                   max: i128)
37                   -> (Integer, bool);
38 }
39
40 impl IntegerExt for Integer {
41     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
42         match (*self, signed) {
43             (I8, false) => tcx.types.u8,
44             (I16, false) => tcx.types.u16,
45             (I32, false) => tcx.types.u32,
46             (I64, false) => tcx.types.u64,
47             (I128, false) => tcx.types.u128,
48             (I8, true) => tcx.types.i8,
49             (I16, true) => tcx.types.i16,
50             (I32, true) => tcx.types.i32,
51             (I64, true) => tcx.types.i64,
52             (I128, true) => tcx.types.i128,
53         }
54     }
55
56     /// Get the Integer type from an attr::IntType.
57     fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
58         let dl = cx.data_layout();
59
60         match ity {
61             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
62             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
63             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
64             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
65             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
66             attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
67                 dl.ptr_sized_integer()
68             }
69         }
70     }
71
72     /// Find the appropriate Integer type and signedness for the given
73     /// signed discriminant range and #[repr] attribute.
74     /// N.B.: u128 values above i128::MAX will be treated as signed, but
75     /// that shouldn't affect anything, other than maybe debuginfo.
76     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
77                   ty: Ty<'tcx>,
78                   repr: &ReprOptions,
79                   min: i128,
80                   max: i128)
81                   -> (Integer, bool) {
82         // Theoretically, negative values could be larger in unsigned representation
83         // than the unsigned representation of the signed minimum. However, if there
84         // are any negative values, the only valid unsigned representation is u128
85         // which can fit all i128 values, so the result remains unaffected.
86         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
87         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
88
89         let mut min_from_extern = None;
90         let min_default = I8;
91
92         if let Some(ity) = repr.int {
93             let discr = Integer::from_attr(tcx, ity);
94             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
95             if discr < fit {
96                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
97                   discriminant range of enum `{}", ty)
98             }
99             return (discr, ity.is_signed());
100         }
101
102         if repr.c() {
103             match &tcx.sess.target.target.arch[..] {
104                 // WARNING: the ARM EABI has two variants; the one corresponding
105                 // to `at_least == I32` appears to be used on Linux and NetBSD,
106                 // but some systems may use the variant corresponding to no
107                 // lower bound.  However, we don't run on those yet...?
108                 "arm" => min_from_extern = Some(I32),
109                 _ => min_from_extern = Some(I32),
110             }
111         }
112
113         let at_least = min_from_extern.unwrap_or(min_default);
114
115         // If there are no negative values, we can use the unsigned fit.
116         if min >= 0 {
117             (cmp::max(unsigned_fit, at_least), false)
118         } else {
119             (cmp::max(signed_fit, at_least), true)
120         }
121     }
122 }
123
124 pub trait PrimitiveExt {
125     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
126 }
127
128 impl PrimitiveExt for Primitive {
129     fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
130         match *self {
131             Int(i, signed) => i.to_ty(tcx, signed),
132             F32 => tcx.types.f32,
133             F64 => tcx.types.f64,
134             Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
135         }
136     }
137 }
138
139 /// The first half of a fat pointer.
140 ///
141 /// - For a trait object, this is the address of the box.
142 /// - For a slice, this is the base address.
143 pub const FAT_PTR_ADDR: usize = 0;
144
145 /// The second half of a fat pointer.
146 ///
147 /// - For a trait object, this is the address of the vtable.
148 /// - For a slice, this is the length.
149 pub const FAT_PTR_EXTRA: usize = 1;
150
151 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
152 pub enum LayoutError<'tcx> {
153     Unknown(Ty<'tcx>),
154     SizeOverflow(Ty<'tcx>)
155 }
156
157 impl<'tcx> fmt::Display for LayoutError<'tcx> {
158     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
159         match *self {
160             LayoutError::Unknown(ty) => {
161                 write!(f, "the type `{:?}` has an unknown layout", ty)
162             }
163             LayoutError::SizeOverflow(ty) => {
164                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
165             }
166         }
167     }
168 }
169
170 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
171                         query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
172                         -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
173 {
174     ty::tls::with_related_context(tcx, move |icx| {
175         let rec_limit = *tcx.sess.recursion_limit.get();
176         let (param_env, ty) = query.into_parts();
177
178         if icx.layout_depth > rec_limit {
179             tcx.sess.fatal(
180                 &format!("overflow representing the type `{}`", ty));
181         }
182
183         // Update the ImplicitCtxt to increase the layout_depth
184         let icx = ty::tls::ImplicitCtxt {
185             layout_depth: icx.layout_depth + 1,
186             ..icx.clone()
187         };
188
189         ty::tls::enter_context(&icx, |_| {
190             let cx = LayoutCx { tcx, param_env };
191             cx.layout_raw_uncached(ty)
192         })
193     })
194 }
195
196 pub fn provide(providers: &mut ty::maps::Providers) {
197     *providers = ty::maps::Providers {
198         layout_raw,
199         ..*providers
200     };
201 }
202
203 #[derive(Copy, Clone)]
204 pub struct LayoutCx<'tcx, C> {
205     pub tcx: C,
206     pub param_env: ty::ParamEnv<'tcx>
207 }
208
209 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
210     fn layout_raw_uncached(self, ty: Ty<'tcx>)
211                            -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
212         let tcx = self.tcx;
213         let param_env = self.param_env;
214         let dl = self.data_layout();
215         let scalar_unit = |value: Primitive| {
216             let bits = value.size(dl).bits();
217             assert!(bits <= 128);
218             Scalar {
219                 value,
220                 valid_range: 0..=(!0 >> (128 - bits))
221             }
222         };
223         let scalar = |value: Primitive| {
224             tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
225         };
226         let scalar_pair = |a: Scalar, b: Scalar| {
227             let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
228             let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
229             let size = (b_offset + b.value.size(dl)).abi_align(align);
230             LayoutDetails {
231                 variants: Variants::Single { index: 0 },
232                 fields: FieldPlacement::Arbitrary {
233                     offsets: vec![Size::from_bytes(0), b_offset],
234                     memory_index: vec![0, 1]
235                 },
236                 abi: Abi::ScalarPair(a, b),
237                 align,
238                 size
239             }
240         };
241
242         #[derive(Copy, Clone, Debug)]
243         enum StructKind {
244             /// A tuple, closure, or univariant which cannot be coerced to unsized.
245             AlwaysSized,
246             /// A univariant, the last field of which may be coerced to unsized.
247             MaybeUnsized,
248             /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
249             Prefixed(Size, Align),
250         }
251         let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
252             let packed = repr.packed();
253             if packed && repr.align > 0 {
254                 bug!("struct cannot be packed and aligned");
255             }
256
257             let pack = {
258                 let pack = repr.pack as u64;
259                 Align::from_bytes(pack, pack).unwrap()
260             };
261
262             let mut align = if packed {
263                 dl.i8_align
264             } else {
265                 dl.aggregate_align
266             };
267
268             let mut sized = true;
269             let mut offsets = vec![Size::from_bytes(0); fields.len()];
270             let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
271
272             let mut optimize = !repr.inhibit_struct_field_reordering_opt();
273             if let StructKind::Prefixed(_, align) = kind {
274                 optimize &= align.abi() == 1;
275             }
276
277             if optimize {
278                 let end = if let StructKind::MaybeUnsized = kind {
279                     fields.len() - 1
280                 } else {
281                     fields.len()
282                 };
283                 let optimizing = &mut inverse_memory_index[..end];
284                 let field_align = |f: &TyLayout| {
285                     if packed { f.align.min(pack).abi() } else { f.align.abi() }
286                 };
287                 match kind {
288                     StructKind::AlwaysSized |
289                     StructKind::MaybeUnsized => {
290                         optimizing.sort_by_key(|&x| {
291                             // Place ZSTs first to avoid "interesting offsets",
292                             // especially with only one or two non-ZST fields.
293                             let f = &fields[x as usize];
294                             (!f.is_zst(), cmp::Reverse(field_align(f)))
295                         });
296                     }
297                     StructKind::Prefixed(..) => {
298                         optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
299                     }
300                 }
301             }
302
303             // inverse_memory_index holds field indices by increasing memory offset.
304             // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
305             // We now write field offsets to the corresponding offset slot;
306             // field 5 with offset 0 puts 0 in offsets[5].
307             // At the bottom of this function, we use inverse_memory_index to produce memory_index.
308
309             let mut offset = Size::from_bytes(0);
310
311             if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
312                 if packed {
313                     let prefix_align = prefix_align.min(pack);
314                     align = align.max(prefix_align);
315                 } else {
316                     align = align.max(prefix_align);
317                 }
318                 offset = prefix_size.abi_align(prefix_align);
319             }
320
321             for &i in &inverse_memory_index {
322                 let field = fields[i as usize];
323                 if !sized {
324                     bug!("univariant: field #{} of `{}` comes after unsized field",
325                         offsets.len(), ty);
326                 }
327
328                 if field.abi == Abi::Uninhabited {
329                     return Ok(LayoutDetails::uninhabited(fields.len()));
330                 }
331
332                 if field.is_unsized() {
333                     sized = false;
334                 }
335
336                 // Invariant: offset < dl.obj_size_bound() <= 1<<61
337                 if packed {
338                     let field_pack = field.align.min(pack);
339                     offset = offset.abi_align(field_pack);
340                     align = align.max(field_pack);
341                 }
342                 else {
343                     offset = offset.abi_align(field.align);
344                     align = align.max(field.align);
345                 }
346
347                 debug!("univariant offset: {:?} field: {:#?}", offset, field);
348                 offsets[i as usize] = offset;
349
350                 offset = offset.checked_add(field.size, dl)
351                     .ok_or(LayoutError::SizeOverflow(ty))?;
352             }
353
354             if repr.align > 0 {
355                 let repr_align = repr.align as u64;
356                 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
357                 debug!("univariant repr_align: {:?}", repr_align);
358             }
359
360             debug!("univariant min_size: {:?}", offset);
361             let min_size = offset;
362
363             // As stated above, inverse_memory_index holds field indices by increasing offset.
364             // This makes it an already-sorted view of the offsets vec.
365             // To invert it, consider:
366             // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
367             // Field 5 would be the first element, so memory_index is i:
368             // Note: if we didn't optimize, it's already right.
369
370             let mut memory_index;
371             if optimize {
372                 memory_index = vec![0; inverse_memory_index.len()];
373
374                 for i in 0..inverse_memory_index.len() {
375                     memory_index[inverse_memory_index[i] as usize]  = i as u32;
376                 }
377             } else {
378                 memory_index = inverse_memory_index;
379             }
380
381             let size = min_size.abi_align(align);
382             let mut abi = Abi::Aggregate { sized };
383
384             // Unpack newtype ABIs and find scalar pairs.
385             if sized && size.bytes() > 0 {
386                 // All other fields must be ZSTs, and we need them to all start at 0.
387                 let mut zst_offsets =
388                     offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
389                 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
390                     let mut non_zst_fields =
391                         fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
392
393                     match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
394                         // We have exactly one non-ZST field.
395                         (Some((i, field)), None, None) => {
396                             // Field fills the struct and it has a scalar or scalar pair ABI.
397                             if offsets[i].bytes() == 0 &&
398                                align.abi() == field.align.abi() &&
399                                size == field.size {
400                                 match field.abi {
401                                     // For plain scalars, or vectors of them, we can't unpack
402                                     // newtypes for `#[repr(C)]`, as that affects C ABIs.
403                                     Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
404                                         abi = field.abi.clone();
405                                     }
406                                     // But scalar pairs are Rust-specific and get
407                                     // treated as aggregates by C ABIs anyway.
408                                     Abi::ScalarPair(..) => {
409                                         abi = field.abi.clone();
410                                     }
411                                     _ => {}
412                                 }
413                             }
414                         }
415
416                         // Two non-ZST fields, and they're both scalars.
417                         (Some((i, &TyLayout {
418                             details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
419                         })), Some((j, &TyLayout {
420                             details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
421                         })), None) => {
422                             // Order by the memory placement, not source order.
423                             let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
424                                 ((i, a), (j, b))
425                             } else {
426                                 ((j, b), (i, a))
427                             };
428                             let pair = scalar_pair(a.clone(), b.clone());
429                             let pair_offsets = match pair.fields {
430                                 FieldPlacement::Arbitrary {
431                                     ref offsets,
432                                     ref memory_index
433                                 } => {
434                                     assert_eq!(memory_index, &[0, 1]);
435                                     offsets
436                                 }
437                                 _ => bug!()
438                             };
439                             if offsets[i] == pair_offsets[0] &&
440                                offsets[j] == pair_offsets[1] &&
441                                align == pair.align &&
442                                size == pair.size {
443                                 // We can use `ScalarPair` only when it matches our
444                                 // already computed layout (including `#[repr(C)]`).
445                                 abi = pair.abi;
446                             }
447                         }
448
449                         _ => {}
450                     }
451                 }
452             }
453
454             Ok(LayoutDetails {
455                 variants: Variants::Single { index: 0 },
456                 fields: FieldPlacement::Arbitrary {
457                     offsets,
458                     memory_index
459                 },
460                 abi,
461                 align,
462                 size
463             })
464         };
465         let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
466             Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
467         };
468         assert!(!ty.has_infer_types());
469
470         Ok(match ty.sty {
471             // Basic scalars.
472             ty::TyBool => {
473                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
474                     value: Int(I8, false),
475                     valid_range: 0..=1
476                 }))
477             }
478             ty::TyChar => {
479                 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
480                     value: Int(I32, false),
481                     valid_range: 0..=0x10FFFF
482                 }))
483             }
484             ty::TyInt(ity) => {
485                 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
486             }
487             ty::TyUint(ity) => {
488                 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
489             }
490             ty::TyFloat(FloatTy::F32) => scalar(F32),
491             ty::TyFloat(FloatTy::F64) => scalar(F64),
492             ty::TyFnPtr(_) => {
493                 let mut ptr = scalar_unit(Pointer);
494                 ptr.valid_range = 1..=*ptr.valid_range.end();
495                 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
496             }
497
498             // The never type.
499             ty::TyNever => {
500                 tcx.intern_layout(LayoutDetails::uninhabited(0))
501             }
502
503             // Potentially-fat pointers.
504             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
505             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
506                 let mut data_ptr = scalar_unit(Pointer);
507                 if !ty.is_unsafe_ptr() {
508                     data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
509                 }
510
511                 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
512                 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
513                     return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
514                 }
515
516                 let unsized_part = tcx.struct_tail(pointee);
517                 let metadata = match unsized_part.sty {
518                     ty::TyForeign(..) => {
519                         return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
520                     }
521                     ty::TySlice(_) | ty::TyStr => {
522                         scalar_unit(Int(dl.ptr_sized_integer(), false))
523                     }
524                     ty::TyDynamic(..) => {
525                         let mut vtable = scalar_unit(Pointer);
526                         vtable.valid_range = 1..=*vtable.valid_range.end();
527                         vtable
528                     }
529                     _ => return Err(LayoutError::Unknown(unsized_part))
530                 };
531
532                 // Effectively a (ptr, meta) tuple.
533                 tcx.intern_layout(scalar_pair(data_ptr, metadata))
534             }
535
536             // Arrays and slices.
537             ty::TyArray(element, mut count) => {
538                 if count.has_projections() {
539                     count = tcx.normalize_erasing_regions(param_env, count);
540                     if count.has_projections() {
541                         return Err(LayoutError::Unknown(ty));
542                     }
543                 }
544
545                 let element = self.layout_of(element)?;
546                 let count = count.val.unwrap_u64();
547                 let size = element.size.checked_mul(count, dl)
548                     .ok_or(LayoutError::SizeOverflow(ty))?;
549
550                 tcx.intern_layout(LayoutDetails {
551                     variants: Variants::Single { index: 0 },
552                     fields: FieldPlacement::Array {
553                         stride: element.size,
554                         count
555                     },
556                     abi: Abi::Aggregate { sized: true },
557                     align: element.align,
558                     size
559                 })
560             }
561             ty::TySlice(element) => {
562                 let element = self.layout_of(element)?;
563                 tcx.intern_layout(LayoutDetails {
564                     variants: Variants::Single { index: 0 },
565                     fields: FieldPlacement::Array {
566                         stride: element.size,
567                         count: 0
568                     },
569                     abi: Abi::Aggregate { sized: false },
570                     align: element.align,
571                     size: Size::from_bytes(0)
572                 })
573             }
574             ty::TyStr => {
575                 tcx.intern_layout(LayoutDetails {
576                     variants: Variants::Single { index: 0 },
577                     fields: FieldPlacement::Array {
578                         stride: Size::from_bytes(1),
579                         count: 0
580                     },
581                     abi: Abi::Aggregate { sized: false },
582                     align: dl.i8_align,
583                     size: Size::from_bytes(0)
584                 })
585             }
586
587             // Odd unit types.
588             ty::TyFnDef(..) => {
589                 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
590             }
591             ty::TyDynamic(..) | ty::TyForeign(..) => {
592                 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
593                   StructKind::AlwaysSized)?;
594                 match unit.abi {
595                     Abi::Aggregate { ref mut sized } => *sized = false,
596                     _ => bug!()
597                 }
598                 tcx.intern_layout(unit)
599             }
600
601             // Tuples, generators and closures.
602             ty::TyGenerator(def_id, ref substs, _) => {
603                 let tys = substs.field_tys(def_id, tcx);
604                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
605                     &ReprOptions::default(),
606                     StructKind::AlwaysSized)?
607             }
608
609             ty::TyClosure(def_id, ref substs) => {
610                 let tys = substs.upvar_tys(def_id, tcx);
611                 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
612                     &ReprOptions::default(),
613                     StructKind::AlwaysSized)?
614             }
615
616             ty::TyTuple(tys) => {
617                 let kind = if tys.len() == 0 {
618                     StructKind::AlwaysSized
619                 } else {
620                     StructKind::MaybeUnsized
621                 };
622
623                 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
624                     &ReprOptions::default(), kind)?
625             }
626
627             // SIMD vector types.
628             ty::TyAdt(def, ..) if def.repr.simd() => {
629                 let element = self.layout_of(ty.simd_type(tcx))?;
630                 let count = ty.simd_size(tcx) as u64;
631                 assert!(count > 0);
632                 let scalar = match element.abi {
633                     Abi::Scalar(ref scalar) => scalar.clone(),
634                     _ => {
635                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
636                                                 a non-machine element type `{}`",
637                                                 ty, element.ty));
638                     }
639                 };
640                 let size = element.size.checked_mul(count, dl)
641                     .ok_or(LayoutError::SizeOverflow(ty))?;
642                 let align = dl.vector_align(size);
643                 let size = size.abi_align(align);
644
645                 tcx.intern_layout(LayoutDetails {
646                     variants: Variants::Single { index: 0 },
647                     fields: FieldPlacement::Array {
648                         stride: element.size,
649                         count
650                     },
651                     abi: Abi::Vector {
652                         element: scalar,
653                         count
654                     },
655                     size,
656                     align,
657                 })
658             }
659
660             // ADTs.
661             ty::TyAdt(def, substs) => {
662                 // Cache the field layouts.
663                 let variants = def.variants.iter().map(|v| {
664                     v.fields.iter().map(|field| {
665                         self.layout_of(field.ty(tcx, substs))
666                     }).collect::<Result<Vec<_>, _>>()
667                 }).collect::<Result<Vec<_>, _>>()?;
668
669                 if def.is_union() {
670                     let packed = def.repr.packed();
671                     if packed && def.repr.align > 0 {
672                         bug!("Union cannot be packed and aligned");
673                     }
674
675                     let pack = {
676                         let pack = def.repr.pack as u64;
677                         Align::from_bytes(pack, pack).unwrap()
678                     };
679
680                     let mut align = if packed {
681                         dl.i8_align
682                     } else {
683                         dl.aggregate_align
684                     };
685
686                     if def.repr.align > 0 {
687                         let repr_align = def.repr.align as u64;
688                         align = align.max(
689                             Align::from_bytes(repr_align, repr_align).unwrap());
690                     }
691
692                     let mut size = Size::from_bytes(0);
693                     for field in &variants[0] {
694                         assert!(!field.is_unsized());
695
696                         if packed {
697                             let field_pack = field.align.min(pack);
698                             align = align.max(field_pack);
699                         } else {
700                             align = align.max(field.align);
701                         }
702                         size = cmp::max(size, field.size);
703                     }
704
705                     return Ok(tcx.intern_layout(LayoutDetails {
706                         variants: Variants::Single { index: 0 },
707                         fields: FieldPlacement::Union(variants[0].len()),
708                         abi: Abi::Aggregate { sized: true },
709                         align,
710                         size: size.abi_align(align)
711                     }));
712                 }
713
714                 let (inh_first, inh_second) = {
715                     let mut inh_variants = (0..variants.len()).filter(|&v| {
716                         variants[v].iter().all(|f| f.abi != Abi::Uninhabited)
717                     });
718                     (inh_variants.next(), inh_variants.next())
719                 };
720                 if inh_first.is_none() {
721                     // Uninhabited because it has no variants, or only uninhabited ones.
722                     return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0)));
723                 }
724
725                 let is_struct = !def.is_enum() ||
726                     // Only one variant is inhabited.
727                     (inh_second.is_none() &&
728                     // Representation optimizations are allowed.
729                      !def.repr.inhibit_enum_layout_opt());
730                 if is_struct {
731                     // Struct, or univariant enum equivalent to a struct.
732                     // (Typechecking will reject discriminant-sizing attrs.)
733
734                     let v = inh_first.unwrap();
735                     let kind = if def.is_enum() || variants[v].len() == 0 {
736                         StructKind::AlwaysSized
737                     } else {
738                         let param_env = tcx.param_env(def.did);
739                         let last_field = def.variants[v].fields.last().unwrap();
740                         let always_sized = tcx.type_of(last_field.did)
741                           .is_sized(tcx.at(DUMMY_SP), param_env);
742                         if !always_sized { StructKind::MaybeUnsized }
743                         else { StructKind::AlwaysSized }
744                     };
745
746                     let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
747                     st.variants = Variants::Single { index: v };
748                     // Exclude 0 from the range of a newtype ABI NonZero<T>.
749                     if Some(def.did) == self.tcx.lang_items().non_zero() {
750                         match st.abi {
751                             Abi::Scalar(ref mut scalar) |
752                             Abi::ScalarPair(ref mut scalar, _) => {
753                                 if *scalar.valid_range.start() == 0 {
754                                     scalar.valid_range = 1..=*scalar.valid_range.end();
755                                 }
756                             }
757                             _ => {}
758                         }
759                     }
760                     return Ok(tcx.intern_layout(st));
761                 }
762
763                 // The current code for niche-filling relies on variant indices
764                 // instead of actual discriminants, so dataful enums with
765                 // explicit discriminants (RFC #2363) would misbehave.
766                 let no_explicit_discriminants = def.variants.iter().enumerate()
767                     .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
768
769                 // Niche-filling enum optimization.
770                 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
771                     let mut dataful_variant = None;
772                     let mut niche_variants = usize::max_value()..=0;
773
774                     // Find one non-ZST variant.
775                     'variants: for (v, fields) in variants.iter().enumerate() {
776                         if fields.iter().any(|f| f.abi == Abi::Uninhabited) {
777                             continue 'variants;
778                         }
779                         for f in fields {
780                             if !f.is_zst() {
781                                 if dataful_variant.is_none() {
782                                     dataful_variant = Some(v);
783                                     continue 'variants;
784                                 } else {
785                                     dataful_variant = None;
786                                     break 'variants;
787                                 }
788                             }
789                         }
790                         niche_variants = *niche_variants.start().min(&v)..=v;
791                     }
792
793                     if niche_variants.start() > niche_variants.end() {
794                         dataful_variant = None;
795                     }
796
797                     if let Some(i) = dataful_variant {
798                         let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
799                         for (field_index, &field) in variants[i].iter().enumerate() {
800                             let (offset, niche, niche_start) =
801                                 match self.find_niche(field, count)? {
802                                     Some(niche) => niche,
803                                     None => continue
804                                 };
805                             let mut align = dl.aggregate_align;
806                             let st = variants.iter().enumerate().map(|(j, v)| {
807                                 let mut st = univariant_uninterned(v,
808                                     &def.repr, StructKind::AlwaysSized)?;
809                                 st.variants = Variants::Single { index: j };
810
811                                 align = align.max(st.align);
812
813                                 Ok(st)
814                             }).collect::<Result<Vec<_>, _>>()?;
815
816                             let offset = st[i].fields.offset(field_index) + offset;
817                             let size = st[i].size;
818
819                             let abi = match st[i].abi {
820                                 Abi::Scalar(_) => Abi::Scalar(niche.clone()),
821                                 Abi::ScalarPair(ref first, ref second) => {
822                                     // We need to use scalar_unit to reset the
823                                     // valid range to the maximal one for that
824                                     // primitive, because only the niche is
825                                     // guaranteed to be initialised, not the
826                                     // other primitive.
827                                     if offset.bytes() == 0 {
828                                         Abi::ScalarPair(niche.clone(), scalar_unit(second.value))
829                                     } else {
830                                         Abi::ScalarPair(scalar_unit(first.value), niche.clone())
831                                     }
832                                 }
833                                 _ => Abi::Aggregate { sized: true },
834                             };
835
836                             return Ok(tcx.intern_layout(LayoutDetails {
837                                 variants: Variants::NicheFilling {
838                                     dataful_variant: i,
839                                     niche_variants,
840                                     niche,
841                                     niche_start,
842                                     variants: st,
843                                 },
844                                 fields: FieldPlacement::Arbitrary {
845                                     offsets: vec![offset],
846                                     memory_index: vec![0]
847                                 },
848                                 abi,
849                                 size,
850                                 align,
851                             }));
852                         }
853                     }
854                 }
855
856                 let (mut min, mut max) = (i128::max_value(), i128::min_value());
857                 let discr_type = def.repr.discr_type();
858                 let bits = Integer::from_attr(tcx, discr_type).size().bits();
859                 for (i, discr) in def.discriminants(tcx).enumerate() {
860                     if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
861                         continue;
862                     }
863                     let mut x = discr.val as i128;
864                     if discr_type.is_signed() {
865                         // sign extend the raw representation to be an i128
866                         x = (x << (128 - bits)) >> (128 - bits);
867                     }
868                     if x < min { min = x; }
869                     if x > max { max = x; }
870                 }
871                 assert!(min <= max, "discriminant range is {}...{}", min, max);
872                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
873
874                 let mut align = dl.aggregate_align;
875                 let mut size = Size::from_bytes(0);
876
877                 // We're interested in the smallest alignment, so start large.
878                 let mut start_align = Align::from_bytes(256, 256).unwrap();
879                 assert_eq!(Integer::for_abi_align(dl, start_align), None);
880
881                 // repr(C) on an enum tells us to make a (tag, union) layout,
882                 // so we need to grow the prefix alignment to be at least
883                 // the alignment of the union. (This value is used both for
884                 // determining the alignment of the overall enum, and the
885                 // determining the alignment of the payload after the tag.)
886                 let mut prefix_align = min_ity.align(dl);
887                 if def.repr.c() {
888                     for fields in &variants {
889                         for field in fields {
890                             prefix_align = prefix_align.max(field.align);
891                         }
892                     }
893                 }
894
895                 // Create the set of structs that represent each variant.
896                 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
897                     let mut st = univariant_uninterned(&field_layouts,
898                         &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
899                     st.variants = Variants::Single { index: i };
900                     // Find the first field we can't move later
901                     // to make room for a larger discriminant.
902                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
903                         if !field.is_zst() || field.align.abi() != 1 {
904                             start_align = start_align.min(field.align);
905                             break;
906                         }
907                     }
908                     size = cmp::max(size, st.size);
909                     align = align.max(st.align);
910                     Ok(st)
911                 }).collect::<Result<Vec<_>, _>>()?;
912
913                 // Align the maximum variant size to the largest alignment.
914                 size = size.abi_align(align);
915
916                 if size.bytes() >= dl.obj_size_bound() {
917                     return Err(LayoutError::SizeOverflow(ty));
918                 }
919
920                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
921                 if typeck_ity < min_ity {
922                     // It is a bug if Layout decided on a greater discriminant size than typeck for
923                     // some reason at this point (based on values discriminant can take on). Mostly
924                     // because this discriminant will be loaded, and then stored into variable of
925                     // type calculated by typeck. Consider such case (a bug): typeck decided on
926                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
927                     // discriminant values. That would be a bug, because then, in trans, in order
928                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
929                     // space necessary to represent would have to be discarded (or layout is wrong
930                     // on thinking it needs 16 bits)
931                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
932                          min_ity, typeck_ity);
933                     // However, it is fine to make discr type however large (as an optimisation)
934                     // after this point â€“ we’ll just truncate the value we load in trans.
935                 }
936
937                 // Check to see if we should use a different type for the
938                 // discriminant. We can safely use a type with the same size
939                 // as the alignment of the first field of each variant.
940                 // We increase the size of the discriminant to avoid LLVM copying
941                 // padding when it doesn't need to. This normally causes unaligned
942                 // load/stores and excessive memcpy/memset operations. By using a
943                 // bigger integer size, LLVM can be sure about its contents and
944                 // won't be so conservative.
945
946                 // Use the initial field alignment
947                 let mut ity = if def.repr.c() || def.repr.int.is_some() {
948                     min_ity
949                 } else {
950                     Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
951                 };
952
953                 // If the alignment is not larger than the chosen discriminant size,
954                 // don't use the alignment as the final size.
955                 if ity <= min_ity {
956                     ity = min_ity;
957                 } else {
958                     // Patch up the variants' first few fields.
959                     let old_ity_size = min_ity.size();
960                     let new_ity_size = ity.size();
961                     for variant in &mut layout_variants {
962                         if variant.abi == Abi::Uninhabited {
963                             continue;
964                         }
965                         match variant.fields {
966                             FieldPlacement::Arbitrary { ref mut offsets, .. } => {
967                                 for i in offsets {
968                                     if *i <= old_ity_size {
969                                         assert_eq!(*i, old_ity_size);
970                                         *i = new_ity_size;
971                                     }
972                                 }
973                                 // We might be making the struct larger.
974                                 if variant.size <= old_ity_size {
975                                     variant.size = new_ity_size;
976                                 }
977                             }
978                             _ => bug!()
979                         }
980                     }
981                 }
982
983                 let tag_mask = !0u128 >> (128 - ity.size().bits());
984                 let tag = Scalar {
985                     value: Int(ity, signed),
986                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
987                 };
988                 let mut abi = Abi::Aggregate { sized: true };
989                 if tag.value.size(dl) == size {
990                     abi = Abi::Scalar(tag.clone());
991                 } else if !tag.is_bool() {
992                     // HACK(nox): Blindly using ScalarPair for all tagged enums
993                     // where applicable leads to Option<u8> being handled as {i1, i8},
994                     // which later confuses SROA and some loop optimisations,
995                     // ultimately leading to the repeat-trusted-len test
996                     // failing. We make the trade-off of using ScalarPair only
997                     // for types where the tag isn't a boolean.
998                     let mut common_prim = None;
999                     for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1000                         let offsets = match layout_variant.fields {
1001                             FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1002                             _ => bug!(),
1003                         };
1004                         let mut fields = field_layouts
1005                             .iter()
1006                             .zip(offsets)
1007                             .filter(|p| !p.0.is_zst());
1008                         let (field, offset) = match (fields.next(), fields.next()) {
1009                             (None, None) => continue,
1010                             (Some(pair), None) => pair,
1011                             _ => {
1012                                 common_prim = None;
1013                                 break;
1014                             }
1015                         };
1016                         let prim = match field.details.abi {
1017                             Abi::Scalar(ref scalar) => scalar.value,
1018                             _ => {
1019                                 common_prim = None;
1020                                 break;
1021                             }
1022                         };
1023                         if let Some(pair) = common_prim {
1024                             // This is pretty conservative. We could go fancier
1025                             // by conflating things like i32 and u32, or even
1026                             // realising that (u8, u8) could just cohabit with
1027                             // u16 or even u32.
1028                             if pair != (prim, offset) {
1029                                 common_prim = None;
1030                                 break;
1031                             }
1032                         } else {
1033                             common_prim = Some((prim, offset));
1034                         }
1035                     }
1036                     if let Some((prim, offset)) = common_prim {
1037                         let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1038                         let pair_offsets = match pair.fields {
1039                             FieldPlacement::Arbitrary {
1040                                 ref offsets,
1041                                 ref memory_index
1042                             } => {
1043                                 assert_eq!(memory_index, &[0, 1]);
1044                                 offsets
1045                             }
1046                             _ => bug!()
1047                         };
1048                         if pair_offsets[0] == Size::from_bytes(0) &&
1049                             pair_offsets[1] == *offset &&
1050                             align == pair.align &&
1051                             size == pair.size {
1052                             // We can use `ScalarPair` only when it matches our
1053                             // already computed layout (including `#[repr(C)]`).
1054                             abi = pair.abi;
1055                         }
1056                     }
1057                 }
1058                 tcx.intern_layout(LayoutDetails {
1059                     variants: Variants::Tagged {
1060                         discr: tag,
1061                         variants: layout_variants,
1062                     },
1063                     fields: FieldPlacement::Arbitrary {
1064                         offsets: vec![Size::from_bytes(0)],
1065                         memory_index: vec![0]
1066                     },
1067                     abi,
1068                     align,
1069                     size
1070                 })
1071             }
1072
1073             // Types with no meaningful known layout.
1074             ty::TyProjection(_) | ty::TyAnon(..) => {
1075                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1076                 if ty == normalized {
1077                     return Err(LayoutError::Unknown(ty));
1078                 }
1079                 tcx.layout_raw(param_env.and(normalized))?
1080             }
1081             ty::TyParam(_) => {
1082                 return Err(LayoutError::Unknown(ty));
1083             }
1084             ty::TyGeneratorWitness(..) | ty::TyInfer(_) | ty::TyError => {
1085                 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1086             }
1087         })
1088     }
1089
1090     /// This is invoked by the `layout_raw` query to record the final
1091     /// layout of each type.
1092     #[inline]
1093     fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1094         // If we are running with `-Zprint-type-sizes`, record layouts for
1095         // dumping later. Ignore layouts that are done with non-empty
1096         // environments or non-monomorphic layouts, as the user only wants
1097         // to see the stuff resulting from the final trans session.
1098         if
1099             !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1100             layout.ty.has_param_types() ||
1101             layout.ty.has_self_ty() ||
1102             !self.param_env.caller_bounds.is_empty()
1103         {
1104             return;
1105         }
1106
1107         self.record_layout_for_printing_outlined(layout)
1108     }
1109
1110     fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1111         // (delay format until we actually need it)
1112         let record = |kind, packed, opt_discr_size, variants| {
1113             let type_desc = format!("{:?}", layout.ty);
1114             self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1115                                                                    type_desc,
1116                                                                    layout.align,
1117                                                                    layout.size,
1118                                                                    packed,
1119                                                                    opt_discr_size,
1120                                                                    variants);
1121         };
1122
1123         let adt_def = match layout.ty.sty {
1124             ty::TyAdt(ref adt_def, _) => {
1125                 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1126                 adt_def
1127             }
1128
1129             ty::TyClosure(..) => {
1130                 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1131                 record(DataTypeKind::Closure, false, None, vec![]);
1132                 return;
1133             }
1134
1135             _ => {
1136                 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1137                 return;
1138             }
1139         };
1140
1141         let adt_kind = adt_def.adt_kind();
1142         let adt_packed = adt_def.repr.packed();
1143
1144         let build_variant_info = |n: Option<ast::Name>,
1145                                   flds: &[ast::Name],
1146                                   layout: TyLayout<'tcx>| {
1147             let mut min_size = Size::from_bytes(0);
1148             let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1149                 match layout.field(self, i) {
1150                     Err(err) => {
1151                         bug!("no layout found for field {}: `{:?}`", name, err);
1152                     }
1153                     Ok(field_layout) => {
1154                         let offset = layout.fields.offset(i);
1155                         let field_end = offset + field_layout.size;
1156                         if min_size < field_end {
1157                             min_size = field_end;
1158                         }
1159                         session::FieldInfo {
1160                             name: name.to_string(),
1161                             offset: offset.bytes(),
1162                             size: field_layout.size.bytes(),
1163                             align: field_layout.align.abi(),
1164                         }
1165                     }
1166                 }
1167             }).collect();
1168
1169             session::VariantInfo {
1170                 name: n.map(|n|n.to_string()),
1171                 kind: if layout.is_unsized() {
1172                     session::SizeKind::Min
1173                 } else {
1174                     session::SizeKind::Exact
1175                 },
1176                 align: layout.align.abi(),
1177                 size: if min_size.bytes() == 0 {
1178                     layout.size.bytes()
1179                 } else {
1180                     min_size.bytes()
1181                 },
1182                 fields: field_info,
1183             }
1184         };
1185
1186         match layout.variants {
1187             Variants::Single { index } => {
1188                 debug!("print-type-size `{:#?}` variant {}",
1189                        layout, adt_def.variants[index].name);
1190                 if !adt_def.variants.is_empty() {
1191                     let variant_def = &adt_def.variants[index];
1192                     let fields: Vec<_> =
1193                         variant_def.fields.iter().map(|f| f.name).collect();
1194                     record(adt_kind.into(),
1195                            adt_packed,
1196                            None,
1197                            vec![build_variant_info(Some(variant_def.name),
1198                                                    &fields,
1199                                                    layout)]);
1200                 } else {
1201                     // (This case arises for *empty* enums; so give it
1202                     // zero variants.)
1203                     record(adt_kind.into(), adt_packed, None, vec![]);
1204                 }
1205             }
1206
1207             Variants::NicheFilling { .. } |
1208             Variants::Tagged { .. } => {
1209                 debug!("print-type-size `{:#?}` adt general variants def {}",
1210                        layout.ty, adt_def.variants.len());
1211                 let variant_infos: Vec<_> =
1212                     adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1213                         let fields: Vec<_> =
1214                             variant_def.fields.iter().map(|f| f.name).collect();
1215                         build_variant_info(Some(variant_def.name),
1216                                             &fields,
1217                                             layout.for_variant(self, i))
1218                     })
1219                     .collect();
1220                 record(adt_kind.into(), adt_packed, match layout.variants {
1221                     Variants::Tagged { ref discr, .. } => Some(discr.value.size(self)),
1222                     _ => None
1223                 }, variant_infos);
1224             }
1225         }
1226     }
1227 }
1228
1229 /// Type size "skeleton", i.e. the only information determining a type's size.
1230 /// While this is conservative, (aside from constant sizes, only pointers,
1231 /// newtypes thereof and null pointer optimized enums are allowed), it is
1232 /// enough to statically check common usecases of transmute.
1233 #[derive(Copy, Clone, Debug)]
1234 pub enum SizeSkeleton<'tcx> {
1235     /// Any statically computable Layout.
1236     Known(Size),
1237
1238     /// A potentially-fat pointer.
1239     Pointer {
1240         /// If true, this pointer is never null.
1241         non_zero: bool,
1242         /// The type which determines the unsized metadata, if any,
1243         /// of this pointer. Either a type parameter or a projection
1244         /// depending on one, with regions erased.
1245         tail: Ty<'tcx>
1246     }
1247 }
1248
1249 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1250     pub fn compute(ty: Ty<'tcx>,
1251                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1252                    param_env: ty::ParamEnv<'tcx>)
1253                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1254         assert!(!ty.has_infer_types());
1255
1256         // First try computing a static layout.
1257         let err = match tcx.layout_of(param_env.and(ty)) {
1258             Ok(layout) => {
1259                 return Ok(SizeSkeleton::Known(layout.size));
1260             }
1261             Err(err) => err
1262         };
1263
1264         match ty.sty {
1265             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1266             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1267                 let non_zero = !ty.is_unsafe_ptr();
1268                 let tail = tcx.struct_tail(pointee);
1269                 match tail.sty {
1270                     ty::TyParam(_) | ty::TyProjection(_) => {
1271                         assert!(tail.has_param_types() || tail.has_self_ty());
1272                         Ok(SizeSkeleton::Pointer {
1273                             non_zero,
1274                             tail: tcx.erase_regions(&tail)
1275                         })
1276                     }
1277                     _ => {
1278                         bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1279                               tail `{}` is not a type parameter or a projection",
1280                              ty, err, tail)
1281                     }
1282                 }
1283             }
1284
1285             ty::TyAdt(def, substs) => {
1286                 // Only newtypes and enums w/ nullable pointer optimization.
1287                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1288                     return Err(err);
1289                 }
1290
1291                 // Get a zero-sized variant or a pointer newtype.
1292                 let zero_or_ptr_variant = |i: usize| {
1293                     let fields = def.variants[i].fields.iter().map(|field| {
1294                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1295                     });
1296                     let mut ptr = None;
1297                     for field in fields {
1298                         let field = field?;
1299                         match field {
1300                             SizeSkeleton::Known(size) => {
1301                                 if size.bytes() > 0 {
1302                                     return Err(err);
1303                                 }
1304                             }
1305                             SizeSkeleton::Pointer {..} => {
1306                                 if ptr.is_some() {
1307                                     return Err(err);
1308                                 }
1309                                 ptr = Some(field);
1310                             }
1311                         }
1312                     }
1313                     Ok(ptr)
1314                 };
1315
1316                 let v0 = zero_or_ptr_variant(0)?;
1317                 // Newtype.
1318                 if def.variants.len() == 1 {
1319                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1320                         return Ok(SizeSkeleton::Pointer {
1321                             non_zero: non_zero ||
1322                                 Some(def.did) == tcx.lang_items().non_zero(),
1323                             tail,
1324                         });
1325                     } else {
1326                         return Err(err);
1327                     }
1328                 }
1329
1330                 let v1 = zero_or_ptr_variant(1)?;
1331                 // Nullable pointer enum optimization.
1332                 match (v0, v1) {
1333                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1334                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1335                         Ok(SizeSkeleton::Pointer {
1336                             non_zero: false,
1337                             tail,
1338                         })
1339                     }
1340                     _ => Err(err)
1341                 }
1342             }
1343
1344             ty::TyProjection(_) | ty::TyAnon(..) => {
1345                 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1346                 if ty == normalized {
1347                     Err(err)
1348                 } else {
1349                     SizeSkeleton::compute(normalized, tcx, param_env)
1350                 }
1351             }
1352
1353             _ => Err(err)
1354         }
1355     }
1356
1357     pub fn same_size(self, other: SizeSkeleton) -> bool {
1358         match (self, other) {
1359             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1360             (SizeSkeleton::Pointer { tail: a, .. },
1361              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1362             _ => false
1363         }
1364     }
1365 }
1366
1367 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1368     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1369 }
1370
1371 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1372     fn data_layout(&self) -> &TargetDataLayout {
1373         &self.data_layout
1374     }
1375 }
1376
1377 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1378     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1379         self.global_tcx()
1380     }
1381 }
1382
1383 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1384     fn data_layout(&self) -> &TargetDataLayout {
1385         self.tcx.data_layout()
1386     }
1387 }
1388
1389 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1390     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1391         self.tcx.tcx()
1392     }
1393 }
1394
1395 pub trait MaybeResult<T> {
1396     fn from_ok(x: T) -> Self;
1397     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1398 }
1399
1400 impl<T> MaybeResult<T> for T {
1401     fn from_ok(x: T) -> Self {
1402         x
1403     }
1404     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1405         f(self)
1406     }
1407 }
1408
1409 impl<T, E> MaybeResult<T> for Result<T, E> {
1410     fn from_ok(x: T) -> Self {
1411         Ok(x)
1412     }
1413     fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1414         self.map(f)
1415     }
1416 }
1417
1418 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1419
1420 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1421     type Ty = Ty<'tcx>;
1422     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1423
1424     /// Computes the layout of a type. Note that this implicitly
1425     /// executes in "reveal all" mode.
1426     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1427         let param_env = self.param_env.with_reveal_all();
1428         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1429         let details = self.tcx.layout_raw(param_env.and(ty))?;
1430         let layout = TyLayout {
1431             ty,
1432             details
1433         };
1434
1435         // NB: This recording is normally disabled; when enabled, it
1436         // can however trigger recursive invocations of `layout_of`.
1437         // Therefore, we execute it *after* the main query has
1438         // completed, to avoid problems around recursive structures
1439         // and the like. (Admittedly, I wasn't able to reproduce a problem
1440         // here, but it seems like the right thing to do. -nmatsakis)
1441         self.record_layout_for_printing(layout);
1442
1443         Ok(layout)
1444     }
1445 }
1446
1447 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>> {
1448     type Ty = Ty<'tcx>;
1449     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1450
1451     /// Computes the layout of a type. Note that this implicitly
1452     /// executes in "reveal all" mode.
1453     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1454         let param_env = self.param_env.with_reveal_all();
1455         let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1456         let details = self.tcx.layout_raw(param_env.and(ty))?;
1457         let layout = TyLayout {
1458             ty,
1459             details
1460         };
1461
1462         // NB: This recording is normally disabled; when enabled, it
1463         // can however trigger recursive invocations of `layout_of`.
1464         // Therefore, we execute it *after* the main query has
1465         // completed, to avoid problems around recursive structures
1466         // and the like. (Admittedly, I wasn't able to reproduce a problem
1467         // here, but it seems like the right thing to do. -nmatsakis)
1468         let cx = LayoutCx {
1469             tcx: *self.tcx,
1470             param_env: self.param_env
1471         };
1472         cx.record_layout_for_printing(layout);
1473
1474         Ok(layout)
1475     }
1476 }
1477
1478 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1479 impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
1480     /// Computes the layout of a type. Note that this implicitly
1481     /// executes in "reveal all" mode.
1482     #[inline]
1483     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1484                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1485         let cx = LayoutCx {
1486             tcx: self,
1487             param_env: param_env_and_ty.param_env
1488         };
1489         cx.layout_of(param_env_and_ty.value)
1490     }
1491 }
1492
1493 impl<'a, 'tcx> ty::maps::TyCtxtAt<'a, 'tcx, 'tcx> {
1494     /// Computes the layout of a type. Note that this implicitly
1495     /// executes in "reveal all" mode.
1496     #[inline]
1497     pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1498                      -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1499         let cx = LayoutCx {
1500             tcx: self,
1501             param_env: param_env_and_ty.param_env
1502         };
1503         cx.layout_of(param_env_and_ty.value)
1504     }
1505 }
1506
1507 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1508     where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1509           C::TyLayout: MaybeResult<TyLayout<'tcx>>
1510 {
1511     fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1512         let details = match this.variants {
1513             Variants::Single { index } if index == variant_index => this.details,
1514
1515             Variants::Single { index } => {
1516                 // Deny calling for_variant more than once for non-Single enums.
1517                 cx.layout_of(this.ty).map_same(|layout| {
1518                     assert_eq!(layout.variants, Variants::Single { index });
1519                     layout
1520                 });
1521
1522                 let fields = match this.ty.sty {
1523                     ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
1524                     _ => bug!()
1525                 };
1526                 let mut details = LayoutDetails::uninhabited(fields);
1527                 details.variants = Variants::Single { index: variant_index };
1528                 cx.tcx().intern_layout(details)
1529             }
1530
1531             Variants::NicheFilling { ref variants, .. } |
1532             Variants::Tagged { ref variants, .. } => {
1533                 &variants[variant_index]
1534             }
1535         };
1536
1537         assert_eq!(details.variants, Variants::Single { index: variant_index });
1538
1539         TyLayout {
1540             ty: this.ty,
1541             details
1542         }
1543     }
1544
1545     fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1546         let tcx = cx.tcx();
1547         cx.layout_of(match this.ty.sty {
1548             ty::TyBool |
1549             ty::TyChar |
1550             ty::TyInt(_) |
1551             ty::TyUint(_) |
1552             ty::TyFloat(_) |
1553             ty::TyFnPtr(_) |
1554             ty::TyNever |
1555             ty::TyFnDef(..) |
1556             ty::TyGeneratorWitness(..) |
1557             ty::TyForeign(..) |
1558             ty::TyDynamic(..) => {
1559                 bug!("TyLayout::field_type({:?}): not applicable", this)
1560             }
1561
1562             // Potentially-fat pointers.
1563             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1564             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1565                 assert!(i < 2);
1566
1567                 // Reuse the fat *T type as its own thin pointer data field.
1568                 // This provides information about e.g. DST struct pointees
1569                 // (which may have no non-DST form), and will work as long
1570                 // as the `Abi` or `FieldPlacement` is checked by users.
1571                 if i == 0 {
1572                     let nil = tcx.mk_nil();
1573                     let ptr_ty = if this.ty.is_unsafe_ptr() {
1574                         tcx.mk_mut_ptr(nil)
1575                     } else {
1576                         tcx.mk_mut_ref(tcx.types.re_static, nil)
1577                     };
1578                     return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1579                         ptr_layout.ty = this.ty;
1580                         ptr_layout
1581                     });
1582                 }
1583
1584                 match tcx.struct_tail(pointee).sty {
1585                     ty::TySlice(_) |
1586                     ty::TyStr => tcx.types.usize,
1587                     ty::TyDynamic(..) => {
1588                         // FIXME(eddyb) use an usize/fn() array with
1589                         // the correct number of vtables slots.
1590                         tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
1591                     }
1592                     _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1593                 }
1594             }
1595
1596             // Arrays and slices.
1597             ty::TyArray(element, _) |
1598             ty::TySlice(element) => element,
1599             ty::TyStr => tcx.types.u8,
1600
1601             // Tuples, generators and closures.
1602             ty::TyClosure(def_id, ref substs) => {
1603                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1604             }
1605
1606             ty::TyGenerator(def_id, ref substs, _) => {
1607                 substs.field_tys(def_id, tcx).nth(i).unwrap()
1608             }
1609
1610             ty::TyTuple(tys) => tys[i],
1611
1612             // SIMD vector types.
1613             ty::TyAdt(def, ..) if def.repr.simd() => {
1614                 this.ty.simd_type(tcx)
1615             }
1616
1617             // ADTs.
1618             ty::TyAdt(def, substs) => {
1619                 match this.variants {
1620                     Variants::Single { index } => {
1621                         def.variants[index].fields[i].ty(tcx, substs)
1622                     }
1623
1624                     // Discriminant field for enums (where applicable).
1625                     Variants::Tagged { ref discr, .. } |
1626                     Variants::NicheFilling { niche: ref discr, .. } => {
1627                         assert_eq!(i, 0);
1628                         let layout = LayoutDetails::scalar(tcx, discr.clone());
1629                         return MaybeResult::from_ok(TyLayout {
1630                             details: tcx.intern_layout(layout),
1631                             ty: discr.value.to_ty(tcx)
1632                         });
1633                     }
1634                 }
1635             }
1636
1637             ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
1638             ty::TyInfer(_) | ty::TyError => {
1639                 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1640             }
1641         })
1642     }
1643 }
1644
1645 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1646     /// Find the offset of a niche leaf field, starting from
1647     /// the given type and recursing through aggregates, which
1648     /// has at least `count` consecutive invalid values.
1649     /// The tuple is `(offset, scalar, niche_value)`.
1650     // FIXME(eddyb) traverse already optimized enums.
1651     fn find_niche(self, layout: TyLayout<'tcx>, count: u128)
1652         -> Result<Option<(Size, Scalar, u128)>, LayoutError<'tcx>>
1653     {
1654         let scalar_component = |scalar: &Scalar, offset| {
1655             let Scalar { value, valid_range: ref v } = *scalar;
1656
1657             let bits = value.size(self).bits();
1658             assert!(bits <= 128);
1659             let max_value = !0u128 >> (128 - bits);
1660
1661             // Find out how many values are outside the valid range.
1662             let niches = if v.start() <= v.end() {
1663                 v.start() + (max_value - v.end())
1664             } else {
1665                 v.start() - v.end() - 1
1666             };
1667
1668             // Give up if we can't fit `count` consecutive niches.
1669             if count > niches {
1670                 return None;
1671             }
1672
1673             let niche_start = v.end().wrapping_add(1) & max_value;
1674             let niche_end = v.end().wrapping_add(count) & max_value;
1675             Some((offset, Scalar {
1676                 value,
1677                 valid_range: *v.start()..=niche_end
1678             }, niche_start))
1679         };
1680
1681         // Locals variables which live across yields are stored
1682         // in the generator type as fields. These may be uninitialized
1683         // so we don't look for niches there.
1684         if let ty::TyGenerator(..) = layout.ty.sty {
1685             return Ok(None);
1686         }
1687
1688         match layout.abi {
1689             Abi::Scalar(ref scalar) => {
1690                 return Ok(scalar_component(scalar, Size::from_bytes(0)));
1691             }
1692             Abi::ScalarPair(ref a, ref b) => {
1693                 return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| {
1694                     scalar_component(b, a.value.size(self).abi_align(b.value.align(self)))
1695                 }));
1696             }
1697             Abi::Vector { ref element, .. } => {
1698                 return Ok(scalar_component(element, Size::from_bytes(0)));
1699             }
1700             _ => {}
1701         }
1702
1703         // Perhaps one of the fields is non-zero, let's recurse and find out.
1704         if let FieldPlacement::Union(_) = layout.fields {
1705             // Only Rust enums have safe-to-inspect fields
1706             // (a discriminant), other unions are unsafe.
1707             if let Variants::Single { .. } = layout.variants {
1708                 return Ok(None);
1709             }
1710         }
1711         if let FieldPlacement::Array { .. } = layout.fields {
1712             if layout.fields.count() > 0 {
1713                 return self.find_niche(layout.field(self, 0)?, count);
1714             }
1715         }
1716         for i in 0..layout.fields.count() {
1717             let r = self.find_niche(layout.field(self, i)?, count)?;
1718             if let Some((offset, scalar, niche_value)) = r {
1719                 let offset = layout.fields.offset(i) + offset;
1720                 return Ok(Some((offset, scalar, niche_value)));
1721             }
1722         }
1723         Ok(None)
1724     }
1725 }
1726
1727 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1728     fn hash_stable<W: StableHasherResult>(&self,
1729                                           hcx: &mut StableHashingContext<'a>,
1730                                           hasher: &mut StableHasher<W>) {
1731         use ty::layout::Variants::*;
1732         mem::discriminant(self).hash_stable(hcx, hasher);
1733
1734         match *self {
1735             Single { index } => {
1736                 index.hash_stable(hcx, hasher);
1737             }
1738             Tagged {
1739                 ref discr,
1740                 ref variants,
1741             } => {
1742                 discr.hash_stable(hcx, hasher);
1743                 variants.hash_stable(hcx, hasher);
1744             }
1745             NicheFilling {
1746                 dataful_variant,
1747                 ref niche_variants,
1748                 ref niche,
1749                 niche_start,
1750                 ref variants,
1751             } => {
1752                 dataful_variant.hash_stable(hcx, hasher);
1753                 niche_variants.start().hash_stable(hcx, hasher);
1754                 niche_variants.end().hash_stable(hcx, hasher);
1755                 niche.hash_stable(hcx, hasher);
1756                 niche_start.hash_stable(hcx, hasher);
1757                 variants.hash_stable(hcx, hasher);
1758             }
1759         }
1760     }
1761 }
1762
1763 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1764     fn hash_stable<W: StableHasherResult>(&self,
1765                                           hcx: &mut StableHashingContext<'a>,
1766                                           hasher: &mut StableHasher<W>) {
1767         use ty::layout::FieldPlacement::*;
1768         mem::discriminant(self).hash_stable(hcx, hasher);
1769
1770         match *self {
1771             Union(count) => {
1772                 count.hash_stable(hcx, hasher);
1773             }
1774             Array { count, stride } => {
1775                 count.hash_stable(hcx, hasher);
1776                 stride.hash_stable(hcx, hasher);
1777             }
1778             Arbitrary { ref offsets, ref memory_index } => {
1779                 offsets.hash_stable(hcx, hasher);
1780                 memory_index.hash_stable(hcx, hasher);
1781             }
1782         }
1783     }
1784 }
1785
1786 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1787     fn hash_stable<W: StableHasherResult>(&self,
1788                                           hcx: &mut StableHashingContext<'a>,
1789                                           hasher: &mut StableHasher<W>) {
1790         use ty::layout::Abi::*;
1791         mem::discriminant(self).hash_stable(hcx, hasher);
1792
1793         match *self {
1794             Uninhabited => {}
1795             Scalar(ref value) => {
1796                 value.hash_stable(hcx, hasher);
1797             }
1798             ScalarPair(ref a, ref b) => {
1799                 a.hash_stable(hcx, hasher);
1800                 b.hash_stable(hcx, hasher);
1801             }
1802             Vector { ref element, count } => {
1803                 element.hash_stable(hcx, hasher);
1804                 count.hash_stable(hcx, hasher);
1805             }
1806             Aggregate { sized } => {
1807                 sized.hash_stable(hcx, hasher);
1808             }
1809         }
1810     }
1811 }
1812
1813 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1814     fn hash_stable<W: StableHasherResult>(&self,
1815                                           hcx: &mut StableHashingContext<'a>,
1816                                           hasher: &mut StableHasher<W>) {
1817         let Scalar { value, ref valid_range } = *self;
1818         value.hash_stable(hcx, hasher);
1819         valid_range.start().hash_stable(hcx, hasher);
1820         valid_range.end().hash_stable(hcx, hasher);
1821     }
1822 }
1823
1824 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1825     variants,
1826     fields,
1827     abi,
1828     size,
1829     align
1830 });
1831
1832 impl_stable_hash_for!(enum ::ty::layout::Integer {
1833     I8,
1834     I16,
1835     I32,
1836     I64,
1837     I128
1838 });
1839
1840 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1841     Int(integer, signed),
1842     F32,
1843     F64,
1844     Pointer
1845 });
1846
1847 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1848     fn hash_stable<W: StableHasherResult>(&self,
1849                                           hcx: &mut StableHashingContext<'gcx>,
1850                                           hasher: &mut StableHasher<W>) {
1851         self.abi().hash_stable(hcx, hasher);
1852         self.pref().hash_stable(hcx, hasher);
1853     }
1854 }
1855
1856 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1857     fn hash_stable<W: StableHasherResult>(&self,
1858                                           hcx: &mut StableHashingContext<'gcx>,
1859                                           hasher: &mut StableHasher<W>) {
1860         self.bytes().hash_stable(hcx, hasher);
1861     }
1862 }
1863
1864 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1865 {
1866     fn hash_stable<W: StableHasherResult>(&self,
1867                                           hcx: &mut StableHashingContext<'a>,
1868                                           hasher: &mut StableHasher<W>) {
1869         use ty::layout::LayoutError::*;
1870         mem::discriminant(self).hash_stable(hcx, hasher);
1871
1872         match *self {
1873             Unknown(t) |
1874             SizeOverflow(t) => t.hash_stable(hcx, hasher)
1875         }
1876     }
1877 }