]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
renamed query
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 pub use self::Integer::*;
12 pub use self::Layout::*;
13 pub use self::Primitive::*;
14
15 use session::{self, DataTypeKind, Session};
16 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags};
17
18 use syntax::ast::{self, FloatTy, IntTy, UintTy};
19 use syntax::attr;
20 use syntax_pos::DUMMY_SP;
21
22 use std::cmp;
23 use std::fmt;
24 use std::i64;
25 use std::iter;
26 use std::mem;
27 use std::ops::Deref;
28
29 use ich::StableHashingContext;
30 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
31                                            StableHasherResult};
32
33 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
34 /// for a target, which contains everything needed to compute layouts.
35 pub struct TargetDataLayout {
36     pub endian: Endian,
37     pub i1_align: Align,
38     pub i8_align: Align,
39     pub i16_align: Align,
40     pub i32_align: Align,
41     pub i64_align: Align,
42     pub i128_align: Align,
43     pub f32_align: Align,
44     pub f64_align: Align,
45     pub pointer_size: Size,
46     pub pointer_align: Align,
47     pub aggregate_align: Align,
48
49     /// Alignments for vector types.
50     pub vector_align: Vec<(Size, Align)>
51 }
52
53 impl Default for TargetDataLayout {
54     /// Creates an instance of `TargetDataLayout`.
55     fn default() -> TargetDataLayout {
56         TargetDataLayout {
57             endian: Endian::Big,
58             i1_align: Align::from_bits(8, 8).unwrap(),
59             i8_align: Align::from_bits(8, 8).unwrap(),
60             i16_align: Align::from_bits(16, 16).unwrap(),
61             i32_align: Align::from_bits(32, 32).unwrap(),
62             i64_align: Align::from_bits(32, 64).unwrap(),
63             i128_align: Align::from_bits(32, 64).unwrap(),
64             f32_align: Align::from_bits(32, 32).unwrap(),
65             f64_align: Align::from_bits(64, 64).unwrap(),
66             pointer_size: Size::from_bits(64),
67             pointer_align: Align::from_bits(64, 64).unwrap(),
68             aggregate_align: Align::from_bits(0, 64).unwrap(),
69             vector_align: vec![
70                 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
71                 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
72             ]
73         }
74     }
75 }
76
77 impl TargetDataLayout {
78     pub fn parse(sess: &Session) -> TargetDataLayout {
79         // Parse a bit count from a string.
80         let parse_bits = |s: &str, kind: &str, cause: &str| {
81             s.parse::<u64>().unwrap_or_else(|err| {
82                 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
83                                   kind, s, cause, err));
84                 0
85             })
86         };
87
88         // Parse a size string.
89         let size = |s: &str, cause: &str| {
90             Size::from_bits(parse_bits(s, "size", cause))
91         };
92
93         // Parse an alignment string.
94         let align = |s: &[&str], cause: &str| {
95             if s.is_empty() {
96                 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
97             }
98             let abi = parse_bits(s[0], "alignment", cause);
99             let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
100             Align::from_bits(abi, pref).unwrap_or_else(|err| {
101                 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
102                                   cause, err));
103                 Align::from_bits(8, 8).unwrap()
104             })
105         };
106
107         let mut dl = TargetDataLayout::default();
108         let mut i128_align_src = 64;
109         for spec in sess.target.target.data_layout.split("-") {
110             match &spec.split(":").collect::<Vec<_>>()[..] {
111                 &["e"] => dl.endian = Endian::Little,
112                 &["E"] => dl.endian = Endian::Big,
113                 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
114                 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
115                 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
116                 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
117                     dl.pointer_size = size(s, p);
118                     dl.pointer_align = align(a, p);
119                 }
120                 &[s, ref a..] if s.starts_with("i") => {
121                     let bits = match s[1..].parse::<u64>() {
122                         Ok(bits) => bits,
123                         Err(_) => {
124                             size(&s[1..], "i"); // For the user error.
125                             continue;
126                         }
127                     };
128                     let a = align(a, s);
129                     match bits {
130                         1 => dl.i1_align = a,
131                         8 => dl.i8_align = a,
132                         16 => dl.i16_align = a,
133                         32 => dl.i32_align = a,
134                         64 => dl.i64_align = a,
135                         _ => {}
136                     }
137                     if bits >= i128_align_src && bits <= 128 {
138                         // Default alignment for i128 is decided by taking the alignment of
139                         // largest-sized i{64...128}.
140                         i128_align_src = bits;
141                         dl.i128_align = a;
142                     }
143                 }
144                 &[s, ref a..] if s.starts_with("v") => {
145                     let v_size = size(&s[1..], "v");
146                     let a = align(a, s);
147                     if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
148                         v.1 = a;
149                         continue;
150                     }
151                     // No existing entry, add a new one.
152                     dl.vector_align.push((v_size, a));
153                 }
154                 _ => {} // Ignore everything else.
155             }
156         }
157
158         // Perform consistency checks against the Target information.
159         let endian_str = match dl.endian {
160             Endian::Little => "little",
161             Endian::Big => "big"
162         };
163         if endian_str != sess.target.target.target_endian {
164             sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
165                                architecture is {}-endian, while \"target-endian\" is `{}`",
166                               endian_str, sess.target.target.target_endian));
167         }
168
169         if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
170             sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
171                                pointers are {}-bit, while \"target-pointer-width\" is `{}`",
172                               dl.pointer_size.bits(), sess.target.target.target_pointer_width));
173         }
174
175         dl
176     }
177
178     /// Return exclusive upper bound on object size.
179     ///
180     /// The theoretical maximum object size is defined as the maximum positive `isize` value.
181     /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
182     /// index every address within an object along with one byte past the end, along with allowing
183     /// `isize` to store the difference between any two pointers into an object.
184     ///
185     /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
186     /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
187     /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
188     /// address space on 64-bit ARMv8 and x86_64.
189     pub fn obj_size_bound(&self) -> u64 {
190         match self.pointer_size.bits() {
191             16 => 1 << 15,
192             32 => 1 << 31,
193             64 => 1 << 47,
194             bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
195         }
196     }
197
198     pub fn ptr_sized_integer(&self) -> Integer {
199         match self.pointer_size.bits() {
200             16 => I16,
201             32 => I32,
202             64 => I64,
203             bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
204         }
205     }
206 }
207
208 pub trait HasDataLayout: Copy {
209     fn data_layout(&self) -> &TargetDataLayout;
210 }
211
212 impl<'a> HasDataLayout for &'a TargetDataLayout {
213     fn data_layout(&self) -> &TargetDataLayout {
214         self
215     }
216 }
217
218 impl<'a, 'tcx> HasDataLayout for TyCtxt<'a, 'tcx, 'tcx> {
219     fn data_layout(&self) -> &TargetDataLayout {
220         &self.data_layout
221     }
222 }
223
224 /// Endianness of the target, which must match cfg(target-endian).
225 #[derive(Copy, Clone)]
226 pub enum Endian {
227     Little,
228     Big
229 }
230
231 /// Size of a type in bytes.
232 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
233 pub struct Size {
234     raw: u64
235 }
236
237 impl Size {
238     pub fn from_bits(bits: u64) -> Size {
239         Size::from_bytes((bits + 7) / 8)
240     }
241
242     pub fn from_bytes(bytes: u64) -> Size {
243         if bytes >= (1 << 61) {
244             bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
245         }
246         Size {
247             raw: bytes
248         }
249     }
250
251     pub fn bytes(self) -> u64 {
252         self.raw
253     }
254
255     pub fn bits(self) -> u64 {
256         self.bytes() * 8
257     }
258
259     pub fn abi_align(self, align: Align) -> Size {
260         let mask = align.abi() - 1;
261         Size::from_bytes((self.bytes() + mask) & !mask)
262     }
263
264     pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
265         let dl = cx.data_layout();
266
267         // Each Size is less than dl.obj_size_bound(), so the sum is
268         // also less than 1 << 62 (and therefore can't overflow).
269         let bytes = self.bytes() + offset.bytes();
270
271         if bytes < dl.obj_size_bound() {
272             Some(Size::from_bytes(bytes))
273         } else {
274             None
275         }
276     }
277
278     pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
279         let dl = cx.data_layout();
280
281         // Each Size is less than dl.obj_size_bound(), so the sum is
282         // also less than 1 << 62 (and therefore can't overflow).
283         match self.bytes().checked_mul(count) {
284             Some(bytes) if bytes < dl.obj_size_bound() => {
285                 Some(Size::from_bytes(bytes))
286             }
287             _ => None
288         }
289     }
290 }
291
292 /// Alignment of a type in bytes, both ABI-mandated and preferred.
293 /// Each field is a power of two, giving the alignment a maximum
294 /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with
295 /// a maximum capacity of 2^31 - 1 or 2147483647.
296 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
297 pub struct Align {
298     abi: u8,
299     pref: u8,
300 }
301
302 impl Align {
303     pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
304         Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
305     }
306
307     pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
308         let log2 = |align: u64| {
309             // Treat an alignment of 0 bytes like 1-byte alignment.
310             if align == 0 {
311                 return Ok(0);
312             }
313
314             let mut bytes = align;
315             let mut pow: u8 = 0;
316             while (bytes & 1) == 0 {
317                 pow += 1;
318                 bytes >>= 1;
319             }
320             if bytes != 1 {
321                 Err(format!("`{}` is not a power of 2", align))
322             } else if pow > 30 {
323                 Err(format!("`{}` is too large", align))
324             } else {
325                 Ok(pow)
326             }
327         };
328
329         Ok(Align {
330             abi: log2(abi)?,
331             pref: log2(pref)?,
332         })
333     }
334
335     pub fn abi(self) -> u64 {
336         1 << self.abi
337     }
338
339     pub fn pref(self) -> u64 {
340         1 << self.pref
341     }
342
343     pub fn min(self, other: Align) -> Align {
344         Align {
345             abi: cmp::min(self.abi, other.abi),
346             pref: cmp::min(self.pref, other.pref),
347         }
348     }
349
350     pub fn max(self, other: Align) -> Align {
351         Align {
352             abi: cmp::max(self.abi, other.abi),
353             pref: cmp::max(self.pref, other.pref),
354         }
355     }
356 }
357
358 /// Integers, also used for enum discriminants.
359 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
360 pub enum Integer {
361     I1,
362     I8,
363     I16,
364     I32,
365     I64,
366     I128,
367 }
368
369 impl Integer {
370     pub fn size(&self) -> Size {
371         match *self {
372             I1 => Size::from_bits(1),
373             I8 => Size::from_bytes(1),
374             I16 => Size::from_bytes(2),
375             I32 => Size::from_bytes(4),
376             I64  => Size::from_bytes(8),
377             I128  => Size::from_bytes(16),
378         }
379     }
380
381     pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
382         let dl = cx.data_layout();
383
384         match *self {
385             I1 => dl.i1_align,
386             I8 => dl.i8_align,
387             I16 => dl.i16_align,
388             I32 => dl.i32_align,
389             I64 => dl.i64_align,
390             I128 => dl.i128_align,
391         }
392     }
393
394     pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>,
395                            signed: bool) -> Ty<'tcx> {
396         match (*self, signed) {
397             (I1, false) => tcx.types.u8,
398             (I8, false) => tcx.types.u8,
399             (I16, false) => tcx.types.u16,
400             (I32, false) => tcx.types.u32,
401             (I64, false) => tcx.types.u64,
402             (I128, false) => tcx.types.u128,
403             (I1, true) => tcx.types.i8,
404             (I8, true) => tcx.types.i8,
405             (I16, true) => tcx.types.i16,
406             (I32, true) => tcx.types.i32,
407             (I64, true) => tcx.types.i64,
408             (I128, true) => tcx.types.i128,
409         }
410     }
411
412     /// Find the smallest Integer type which can represent the signed value.
413     pub fn fit_signed(x: i64) -> Integer {
414         match x {
415             -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1,
416             -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
417             -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
418             -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
419             -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
420             _ => I128
421         }
422     }
423
424     /// Find the smallest Integer type which can represent the unsigned value.
425     pub fn fit_unsigned(x: u64) -> Integer {
426         match x {
427             0...0x0000_0000_0000_0001 => I1,
428             0...0x0000_0000_0000_00ff => I8,
429             0...0x0000_0000_0000_ffff => I16,
430             0...0x0000_0000_ffff_ffff => I32,
431             0...0xffff_ffff_ffff_ffff => I64,
432             _ => I128,
433         }
434     }
435
436     /// Find the smallest integer with the given alignment.
437     pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
438         let dl = cx.data_layout();
439
440         let wanted = align.abi();
441         for &candidate in &[I8, I16, I32, I64] {
442             let ty = Int(candidate);
443             if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
444                 return Some(candidate);
445             }
446         }
447         None
448     }
449
450     /// Get the Integer type from an attr::IntType.
451     pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
452         let dl = cx.data_layout();
453
454         match ity {
455             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
456             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
457             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
458             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
459             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
460             attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
461                 dl.ptr_sized_integer()
462             }
463         }
464     }
465
466     /// Find the appropriate Integer type and signedness for the given
467     /// signed discriminant range and #[repr] attribute.
468     /// N.B.: u64 values above i64::MAX will be treated as signed, but
469     /// that shouldn't affect anything, other than maybe debuginfo.
470     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
471                             ty: Ty<'tcx>,
472                             repr: &ReprOptions,
473                             min: i64,
474                             max: i64)
475                             -> (Integer, bool) {
476         // Theoretically, negative values could be larger in unsigned representation
477         // than the unsigned representation of the signed minimum. However, if there
478         // are any negative values, the only valid unsigned representation is u64
479         // which can fit all i64 values, so the result remains unaffected.
480         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
481         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
482
483         let mut min_from_extern = None;
484         let min_default = I8;
485
486         if let Some(ity) = repr.int {
487             let discr = Integer::from_attr(tcx, ity);
488             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
489             if discr < fit {
490                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
491                   discriminant range of enum `{}", ty)
492             }
493             return (discr, ity.is_signed());
494         }
495
496         if repr.c() {
497             match &tcx.sess.target.target.arch[..] {
498                 // WARNING: the ARM EABI has two variants; the one corresponding
499                 // to `at_least == I32` appears to be used on Linux and NetBSD,
500                 // but some systems may use the variant corresponding to no
501                 // lower bound.  However, we don't run on those yet...?
502                 "arm" => min_from_extern = Some(I32),
503                 _ => min_from_extern = Some(I32),
504             }
505         }
506
507         let at_least = min_from_extern.unwrap_or(min_default);
508
509         // If there are no negative values, we can use the unsigned fit.
510         if min >= 0 {
511             (cmp::max(unsigned_fit, at_least), false)
512         } else {
513             (cmp::max(signed_fit, at_least), true)
514         }
515     }
516 }
517
518 /// Fundamental unit of memory access and layout.
519 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
520 pub enum Primitive {
521     Int(Integer),
522     F32,
523     F64,
524     Pointer
525 }
526
527 impl Primitive {
528     pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
529         let dl = cx.data_layout();
530
531         match self {
532             Int(I1) | Int(I8) => Size::from_bits(8),
533             Int(I16) => Size::from_bits(16),
534             Int(I32) | F32 => Size::from_bits(32),
535             Int(I64) | F64 => Size::from_bits(64),
536             Int(I128) => Size::from_bits(128),
537             Pointer => dl.pointer_size
538         }
539     }
540
541     pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
542         let dl = cx.data_layout();
543
544         match self {
545             Int(I1) => dl.i1_align,
546             Int(I8) => dl.i8_align,
547             Int(I16) => dl.i16_align,
548             Int(I32) => dl.i32_align,
549             Int(I64) => dl.i64_align,
550             Int(I128) => dl.i128_align,
551             F32 => dl.f32_align,
552             F64 => dl.f64_align,
553             Pointer => dl.pointer_align
554         }
555     }
556 }
557
558 /// Path through fields of nested structures.
559 // FIXME(eddyb) use small vector optimization for the common case.
560 pub type FieldPath = Vec<u32>;
561
562 /// A structure, a product type in ADT terms.
563 #[derive(PartialEq, Eq, Hash, Debug)]
564 pub struct Struct {
565     /// Maximum alignment of fields and repr alignment.
566     pub align: Align,
567
568     /// Primitive alignment of fields without repr alignment.
569     pub primitive_align: Align,
570
571     /// If true, no alignment padding is used.
572     pub packed: bool,
573
574     /// If true, the size is exact, otherwise it's only a lower bound.
575     pub sized: bool,
576
577     /// Offsets for the first byte of each field, ordered to match the source definition order.
578     /// This vector does not go in increasing order.
579     /// FIXME(eddyb) use small vector optimization for the common case.
580     pub offsets: Vec<Size>,
581
582     /// Maps source order field indices to memory order indices, depending how fields were permuted.
583     /// FIXME (camlorn) also consider small vector  optimization here.
584     pub memory_index: Vec<u32>,
585
586     pub min_size: Size,
587 }
588
589 /// Info required to optimize struct layout.
590 #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
591 enum StructKind {
592     /// A tuple, closure, or univariant which cannot be coerced to unsized.
593     AlwaysSizedUnivariant,
594     /// A univariant, the last field of which may be coerced to unsized.
595     MaybeUnsizedUnivariant,
596     /// A univariant, but part of an enum.
597     EnumVariant,
598 }
599
600 impl<'a, 'tcx> Struct {
601     fn new(dl: &TargetDataLayout,
602            fields: &Vec<&'a Layout>,
603            repr: &ReprOptions,
604            kind: StructKind,
605            scapegoat: Ty<'tcx>)
606            -> Result<Struct, LayoutError<'tcx>> {
607         if repr.packed() && repr.align > 0 {
608             bug!("Struct cannot be packed and aligned");
609         }
610
611         let align = if repr.packed() {
612             dl.i8_align
613         } else {
614             dl.aggregate_align
615         };
616
617         let mut ret = Struct {
618             align,
619             primitive_align: align,
620             packed: repr.packed(),
621             sized: true,
622             offsets: vec![],
623             memory_index: vec![],
624             min_size: Size::from_bytes(0),
625         };
626
627         // Anything with repr(C) or repr(packed) doesn't optimize.
628         // Neither do  1-member and 2-member structs.
629         // In addition, code in trans assume that 2-element structs can become pairs.
630         // It's easier to just short-circuit here.
631         let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
632             && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty();
633
634         let (optimize, sort_ascending) = match kind {
635             StructKind::AlwaysSizedUnivariant => (can_optimize, false),
636             StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
637             StructKind::EnumVariant => {
638                 assert!(fields.len() >= 1, "Enum variants must have discriminants.");
639                 (can_optimize && fields[0].size(dl).bytes() == 1, true)
640             }
641         };
642
643         ret.offsets = vec![Size::from_bytes(0); fields.len()];
644         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
645
646         if optimize {
647             let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
648             let end = if let StructKind::MaybeUnsizedUnivariant = kind {
649                 fields.len() - 1
650             } else {
651                 fields.len()
652             };
653             if end > start {
654                 let optimizing  = &mut inverse_memory_index[start..end];
655                 if sort_ascending {
656                     optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
657                 } else {
658                     optimizing.sort_by(| &a, &b | {
659                         let a = fields[a as usize].align(dl).abi();
660                         let b = fields[b as usize].align(dl).abi();
661                         b.cmp(&a)
662                     });
663                 }
664             }
665         }
666
667         // inverse_memory_index holds field indices by increasing memory offset.
668         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
669         // We now write field offsets to the corresponding offset slot;
670         // field 5 with offset 0 puts 0 in offsets[5].
671         // At the bottom of this function, we use inverse_memory_index to produce memory_index.
672
673         if let StructKind::EnumVariant = kind {
674             assert_eq!(inverse_memory_index[0], 0,
675               "Enum variant discriminants must have the lowest offset.");
676         }
677
678         let mut offset = Size::from_bytes(0);
679
680         for i in inverse_memory_index.iter() {
681             let field = fields[*i as usize];
682             if !ret.sized {
683                 bug!("Struct::new: field #{} of `{}` comes after unsized field",
684                      ret.offsets.len(), scapegoat);
685             }
686
687             if field.is_unsized() {
688                 ret.sized = false;
689             }
690
691             // Invariant: offset < dl.obj_size_bound() <= 1<<61
692             if !ret.packed {
693                 let align = field.align(dl);
694                 let primitive_align = field.primitive_align(dl);
695                 ret.align = ret.align.max(align);
696                 ret.primitive_align = ret.primitive_align.max(primitive_align);
697                 offset = offset.abi_align(align);
698             }
699
700             debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
701             ret.offsets[*i as usize] = offset;
702
703             offset = offset.checked_add(field.size(dl), dl)
704                            .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
705         }
706
707         if repr.align > 0 {
708             let repr_align = repr.align as u64;
709             ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
710             debug!("Struct::new repr_align: {:?}", repr_align);
711         }
712
713         debug!("Struct::new min_size: {:?}", offset);
714         ret.min_size = offset;
715
716         // As stated above, inverse_memory_index holds field indices by increasing offset.
717         // This makes it an already-sorted view of the offsets vec.
718         // To invert it, consider:
719         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
720         // Field 5 would be the first element, so memory_index is i:
721         // Note: if we didn't optimize, it's already right.
722
723         if optimize {
724             ret.memory_index = vec![0; inverse_memory_index.len()];
725
726             for i in 0..inverse_memory_index.len() {
727                 ret.memory_index[inverse_memory_index[i] as usize]  = i as u32;
728             }
729         } else {
730             ret.memory_index = inverse_memory_index;
731         }
732
733         Ok(ret)
734     }
735
736     /// Get the size with trailing alignment padding.
737     pub fn stride(&self) -> Size {
738         self.min_size.abi_align(self.align)
739     }
740
741     /// Determine whether a structure would be zero-sized, given its fields.
742     fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
743                               -> Result<bool, LayoutError<'tcx>>
744     where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
745         for field in fields {
746             let field = field?;
747             if field.is_unsized() || field.size(dl).bytes() > 0 {
748                 return Ok(false);
749             }
750         }
751         Ok(true)
752     }
753
754     /// Get indices of the tys that made this struct by increasing offset.
755     #[inline]
756     pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
757         let mut inverse_small = [0u8; 64];
758         let mut inverse_big = vec![];
759         let use_small = self.memory_index.len() <= inverse_small.len();
760
761         // We have to write this logic twice in order to keep the array small.
762         if use_small {
763             for i in 0..self.memory_index.len() {
764                 inverse_small[self.memory_index[i] as usize] = i as u8;
765             }
766         } else {
767             inverse_big = vec![0; self.memory_index.len()];
768             for i in 0..self.memory_index.len() {
769                 inverse_big[self.memory_index[i] as usize] = i as u32;
770             }
771         }
772
773         (0..self.memory_index.len()).map(move |i| {
774             if use_small { inverse_small[i] as usize }
775             else { inverse_big[i] as usize }
776         })
777     }
778
779     /// Find the path leading to a non-zero leaf field, starting from
780     /// the given type and recursing through aggregates.
781     /// The tuple is `(path, source_path)`,
782     /// where `path` is in memory order and `source_path` in source order.
783     // FIXME(eddyb) track value ranges and traverse already optimized enums.
784     fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>,
785                               param_env: ty::ParamEnv<'tcx>,
786                               ty: Ty<'tcx>)
787                               -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>> {
788         match (ty.layout(tcx, param_env)?, &ty.sty) {
789             (&Scalar { non_zero: true, .. }, _) |
790             (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
791             (&FatPointer { non_zero: true, .. }, _) => {
792                 Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
793             }
794
795             // Is this the NonZero lang item wrapping a pointer or integer type?
796             (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
797                 let fields = &def.struct_variant().fields;
798                 assert_eq!(fields.len(), 1);
799                 match *fields[0].ty(tcx, substs).layout(tcx, param_env)? {
800                     // FIXME(eddyb) also allow floating-point types here.
801                     Scalar { value: Int(_), non_zero: false } |
802                     Scalar { value: Pointer, non_zero: false } => {
803                         Ok(Some((vec![0], vec![0])))
804                     }
805                     FatPointer { non_zero: false, .. } => {
806                         let tmp = vec![FAT_PTR_ADDR as u32, 0];
807                         Ok(Some((tmp.clone(), tmp)))
808                     }
809                     _ => Ok(None)
810                 }
811             }
812
813             // Perhaps one of the fields of this struct is non-zero
814             // let's recurse and find out
815             (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
816                 Struct::non_zero_field_paths(
817                     tcx,
818                     param_env,
819                     def.struct_variant().fields.iter().map(|field| {
820                         field.ty(tcx, substs)
821                     }),
822                     Some(&variant.memory_index[..]))
823             }
824
825             // Perhaps one of the upvars of this closure is non-zero
826             (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
827                 let upvar_tys = substs.upvar_tys(def, tcx);
828                 Struct::non_zero_field_paths(
829                     tcx,
830                     param_env,
831                     upvar_tys,
832                     Some(&variant.memory_index[..]))
833             }
834             // Can we use one of the fields in this tuple?
835             (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
836                 Struct::non_zero_field_paths(
837                     tcx,
838                     param_env,
839                     tys.iter().cloned(),
840                     Some(&variant.memory_index[..]))
841             }
842
843             // Is this a fixed-size array of something non-zero
844             // with at least one element?
845             (_, &ty::TyArray(ety, mut count)) => {
846                 if count.has_projections() {
847                     count = tcx.normalize_associated_type_in_env(&count, param_env);
848                     if count.has_projections() {
849                         return Err(LayoutError::Unknown(ty));
850                     }
851                 }
852                 if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 {
853                     Struct::non_zero_field_paths(
854                         tcx,
855                         param_env,
856                         Some(ety).into_iter(),
857                         None)
858                 } else {
859                     Ok(None)
860                 }
861             }
862
863             (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
864                 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
865                 if ty == normalized {
866                     return Ok(None);
867                 }
868                 return Struct::non_zero_field_in_type(tcx, param_env, normalized);
869             }
870
871             // Anything else is not a non-zero type.
872             _ => Ok(None)
873         }
874     }
875
876     /// Find the path leading to a non-zero leaf field, starting from
877     /// the given set of fields and recursing through aggregates.
878     /// Returns Some((path, source_path)) on success.
879     /// `path` is translated to memory order. `source_path` is not.
880     fn non_zero_field_paths<I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
881                                param_env: ty::ParamEnv<'tcx>,
882                                fields: I,
883                                permutation: Option<&[u32]>)
884                                -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>>
885     where I: Iterator<Item=Ty<'tcx>> {
886         for (i, ty) in fields.enumerate() {
887             let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?;
888             if let Some((mut path, mut source_path)) = r {
889                 source_path.push(i as u32);
890                 let index = if let Some(p) = permutation {
891                     p[i] as usize
892                 } else {
893                     i
894                 };
895                 path.push(index as u32);
896                 return Ok(Some((path, source_path)));
897             }
898         }
899         Ok(None)
900     }
901
902     pub fn over_align(&self) -> Option<u32> {
903         let align = self.align.abi();
904         let primitive_align = self.primitive_align.abi();
905         if align > primitive_align {
906             Some(align as u32)
907         } else {
908             None
909         }
910     }
911 }
912
913 /// An untagged union.
914 #[derive(PartialEq, Eq, Hash, Debug)]
915 pub struct Union {
916     pub align: Align,
917     pub primitive_align: Align,
918
919     pub min_size: Size,
920
921     /// If true, no alignment padding is used.
922     pub packed: bool,
923 }
924
925 impl<'a, 'tcx> Union {
926     fn new(dl: &TargetDataLayout, repr: &ReprOptions) -> Union {
927         if repr.packed() && repr.align > 0 {
928             bug!("Union cannot be packed and aligned");
929         }
930
931         let primitive_align = if repr.packed() {
932             dl.i8_align
933         } else {
934             dl.aggregate_align
935         };
936
937         let align = if repr.align > 0 {
938             let repr_align = repr.align as u64;
939             debug!("Union::new repr_align: {:?}", repr_align);
940             primitive_align.max(Align::from_bytes(repr_align, repr_align).unwrap())
941         } else {
942             primitive_align
943         };
944
945         Union {
946             align,
947             primitive_align,
948             min_size: Size::from_bytes(0),
949             packed: repr.packed(),
950         }
951     }
952
953     /// Extend the Struct with more fields.
954     fn extend<I>(&mut self, dl: &TargetDataLayout,
955                  fields: I,
956                  scapegoat: Ty<'tcx>)
957                  -> Result<(), LayoutError<'tcx>>
958     where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
959         for (index, field) in fields.enumerate() {
960             let field = field?;
961             if field.is_unsized() {
962                 bug!("Union::extend: field #{} of `{}` is unsized",
963                      index, scapegoat);
964             }
965
966             debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
967
968             if !self.packed {
969                 self.align = self.align.max(field.align(dl));
970                 self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
971             }
972             self.min_size = cmp::max(self.min_size, field.size(dl));
973         }
974
975         debug!("Union::extend min-size: {:?}", self.min_size);
976
977         Ok(())
978     }
979
980     /// Get the size with trailing alignment padding.
981     pub fn stride(&self) -> Size {
982         self.min_size.abi_align(self.align)
983     }
984
985     pub fn over_align(&self) -> Option<u32> {
986         let align = self.align.abi();
987         let primitive_align = self.primitive_align.abi();
988         if align > primitive_align {
989             Some(align as u32)
990         } else {
991             None
992         }
993     }
994 }
995
996 /// The first half of a fat pointer.
997 /// - For a trait object, this is the address of the box.
998 /// - For a slice, this is the base address.
999 pub const FAT_PTR_ADDR: usize = 0;
1000
1001 /// The second half of a fat pointer.
1002 /// - For a trait object, this is the address of the vtable.
1003 /// - For a slice, this is the length.
1004 pub const FAT_PTR_EXTRA: usize = 1;
1005
1006 /// Type layout, from which size and alignment can be cheaply computed.
1007 /// For ADTs, it also includes field placement and enum optimizations.
1008 /// NOTE: Because Layout is interned, redundant information should be
1009 /// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
1010 #[derive(Debug, PartialEq, Eq, Hash)]
1011 pub enum Layout {
1012     /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
1013     Scalar {
1014         value: Primitive,
1015         // If true, the value cannot represent a bit pattern of all zeroes.
1016         non_zero: bool
1017     },
1018
1019     /// SIMD vectors, from structs marked with #[repr(simd)].
1020     Vector {
1021         element: Primitive,
1022         count: u64
1023     },
1024
1025     /// TyArray, TySlice or TyStr.
1026     Array {
1027         /// If true, the size is exact, otherwise it's only a lower bound.
1028         sized: bool,
1029         align: Align,
1030         primitive_align: Align,
1031         element_size: Size,
1032         count: u64
1033     },
1034
1035     /// TyRawPtr or TyRef with a !Sized pointee.
1036     FatPointer {
1037         metadata: Primitive,
1038         /// If true, the pointer cannot be null.
1039         non_zero: bool
1040     },
1041
1042     // Remaining variants are all ADTs such as structs, enums or tuples.
1043
1044     /// C-like enums; basically an integer.
1045     CEnum {
1046         discr: Integer,
1047         signed: bool,
1048         non_zero: bool,
1049         /// Inclusive discriminant range.
1050         /// If min > max, it represents min...u64::MAX followed by 0...max.
1051         // FIXME(eddyb) always use the shortest range, e.g. by finding
1052         // the largest space between two consecutive discriminants and
1053         // taking everything else as the (shortest) discriminant range.
1054         min: u64,
1055         max: u64
1056     },
1057
1058     /// Single-case enums, and structs/tuples.
1059     Univariant {
1060         variant: Struct,
1061         /// If true, the structure is NonZero.
1062         // FIXME(eddyb) use a newtype Layout kind for this.
1063         non_zero: bool
1064     },
1065
1066     /// Untagged unions.
1067     UntaggedUnion {
1068         variants: Union,
1069     },
1070
1071     /// General-case enums: for each case there is a struct, and they
1072     /// all start with a field for the discriminant.
1073     General {
1074         discr: Integer,
1075         variants: Vec<Struct>,
1076         size: Size,
1077         align: Align,
1078         primitive_align: Align,
1079     },
1080
1081     /// Two cases distinguished by a nullable pointer: the case with discriminant
1082     /// `nndiscr` must have single field which is known to be nonnull due to its type.
1083     /// The other case is known to be zero sized. Hence we represent the enum
1084     /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
1085     /// otherwise it indicates the other case.
1086     ///
1087     /// For example, `std::option::Option` instantiated at a safe pointer type
1088     /// is represented such that `None` is a null pointer and `Some` is the
1089     /// identity function.
1090     RawNullablePointer {
1091         nndiscr: u64,
1092         value: Primitive
1093     },
1094
1095     /// Two cases distinguished by a nullable pointer: the case with discriminant
1096     /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
1097     /// field is known to be nonnull due to its type; if that field is null, then
1098     /// it represents the other case, which is known to be zero sized.
1099     StructWrappedNullablePointer {
1100         nndiscr: u64,
1101         nonnull: Struct,
1102         /// N.B. There is a 0 at the start, for LLVM GEP through a pointer.
1103         discrfield: FieldPath,
1104         /// Like discrfield, but in source order. For debuginfo.
1105         discrfield_source: FieldPath
1106     }
1107 }
1108
1109 #[derive(Copy, Clone, Debug)]
1110 pub enum LayoutError<'tcx> {
1111     Unknown(Ty<'tcx>),
1112     SizeOverflow(Ty<'tcx>)
1113 }
1114
1115 impl<'tcx> fmt::Display for LayoutError<'tcx> {
1116     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1117         match *self {
1118             LayoutError::Unknown(ty) => {
1119                 write!(f, "the type `{:?}` has an unknown layout", ty)
1120             }
1121             LayoutError::SizeOverflow(ty) => {
1122                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
1123             }
1124         }
1125     }
1126 }
1127
1128 impl<'a, 'tcx> Layout {
1129     pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1130                             param_env: ty::ParamEnv<'tcx>,
1131                             ty: Ty<'tcx>)
1132                             -> Result<&'tcx Layout, LayoutError<'tcx>> {
1133         let success = |layout| Ok(tcx.intern_layout(layout));
1134         let dl = &tcx.data_layout;
1135         assert!(!ty.has_infer_types());
1136
1137         let ptr_layout = |pointee: Ty<'tcx>| {
1138             let non_zero = !ty.is_unsafe_ptr();
1139             let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
1140             if pointee.is_sized(tcx, param_env, DUMMY_SP) {
1141                 Ok(Scalar { value: Pointer, non_zero: non_zero })
1142             } else {
1143                 let unsized_part = tcx.struct_tail(pointee);
1144                 let meta = match unsized_part.sty {
1145                     ty::TySlice(_) | ty::TyStr => {
1146                         Int(dl.ptr_sized_integer())
1147                     }
1148                     ty::TyDynamic(..) => Pointer,
1149                     _ => return Err(LayoutError::Unknown(unsized_part))
1150                 };
1151                 Ok(FatPointer { metadata: meta, non_zero: non_zero })
1152             }
1153         };
1154
1155         let layout = match ty.sty {
1156             // Basic scalars.
1157             ty::TyBool => Scalar { value: Int(I1), non_zero: false },
1158             ty::TyChar => Scalar { value: Int(I32), non_zero: false },
1159             ty::TyInt(ity) => {
1160                 Scalar {
1161                     value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
1162                     non_zero: false
1163                 }
1164             }
1165             ty::TyUint(ity) => {
1166                 Scalar {
1167                     value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
1168                     non_zero: false
1169                 }
1170             }
1171             ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
1172             ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
1173             ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
1174
1175             // The never type.
1176             ty::TyNever => Univariant {
1177                 variant: Struct::new(dl, &vec![], &ReprOptions::default(),
1178                   StructKind::AlwaysSizedUnivariant, ty)?,
1179                 non_zero: false
1180             },
1181
1182             // Potentially-fat pointers.
1183             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1184             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1185                 ptr_layout(pointee)?
1186             }
1187             ty::TyAdt(def, _) if def.is_box() => {
1188                 ptr_layout(ty.boxed_ty())?
1189             }
1190
1191             // Arrays and slices.
1192             ty::TyArray(element, mut count) => {
1193                 if count.has_projections() {
1194                     count = tcx.normalize_associated_type_in_env(&count, param_env);
1195                     if count.has_projections() {
1196                         return Err(LayoutError::Unknown(ty));
1197                     }
1198                 }
1199
1200                 let element = element.layout(tcx, param_env)?;
1201                 let element_size = element.size(dl);
1202                 let count = count.val.to_const_int().unwrap().to_u64().unwrap();
1203                 if element_size.checked_mul(count, dl).is_none() {
1204                     return Err(LayoutError::SizeOverflow(ty));
1205                 }
1206                 Array {
1207                     sized: true,
1208                     align: element.align(dl),
1209                     primitive_align: element.primitive_align(dl),
1210                     element_size,
1211                     count,
1212                 }
1213             }
1214             ty::TySlice(element) => {
1215                 let element = element.layout(tcx, param_env)?;
1216                 Array {
1217                     sized: false,
1218                     align: element.align(dl),
1219                     primitive_align: element.primitive_align(dl),
1220                     element_size: element.size(dl),
1221                     count: 0
1222                 }
1223             }
1224             ty::TyStr => {
1225                 Array {
1226                     sized: false,
1227                     align: dl.i8_align,
1228                     primitive_align: dl.i8_align,
1229                     element_size: Size::from_bytes(1),
1230                     count: 0
1231                 }
1232             }
1233
1234             // Odd unit types.
1235             ty::TyFnDef(..) => {
1236                 Univariant {
1237                     variant: Struct::new(dl, &vec![],
1238                       &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?,
1239                     non_zero: false
1240                 }
1241             }
1242             ty::TyDynamic(..) => {
1243                 let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(),
1244                   StructKind::AlwaysSizedUnivariant, ty)?;
1245                 unit.sized = false;
1246                 Univariant { variant: unit, non_zero: false }
1247             }
1248
1249             // Tuples, generators and closures.
1250             ty::TyGenerator(def_id, ref substs, _) => {
1251                 let tys = substs.field_tys(def_id, tcx);
1252                 let st = Struct::new(dl,
1253                     &tys.map(|ty| ty.layout(tcx, param_env))
1254                       .collect::<Result<Vec<_>, _>>()?,
1255                     &ReprOptions::default(),
1256                     StructKind::AlwaysSizedUnivariant, ty)?;
1257                 Univariant { variant: st, non_zero: false }
1258             }
1259
1260             ty::TyClosure(def_id, ref substs) => {
1261                 let tys = substs.upvar_tys(def_id, tcx);
1262                 let st = Struct::new(dl,
1263                     &tys.map(|ty| ty.layout(tcx, param_env))
1264                       .collect::<Result<Vec<_>, _>>()?,
1265                     &ReprOptions::default(),
1266                     StructKind::AlwaysSizedUnivariant, ty)?;
1267                 Univariant { variant: st, non_zero: false }
1268             }
1269
1270             ty::TyTuple(tys, _) => {
1271                 let kind = if tys.len() == 0 {
1272                     StructKind::AlwaysSizedUnivariant
1273                 } else {
1274                     StructKind::MaybeUnsizedUnivariant
1275                 };
1276
1277                 let st = Struct::new(dl,
1278                     &tys.iter().map(|ty| ty.layout(tcx, param_env))
1279                       .collect::<Result<Vec<_>, _>>()?,
1280                     &ReprOptions::default(), kind, ty)?;
1281                 Univariant { variant: st, non_zero: false }
1282             }
1283
1284             // SIMD vector types.
1285             ty::TyAdt(def, ..) if def.repr.simd() => {
1286                 let element = ty.simd_type(tcx);
1287                 match *element.layout(tcx, param_env)? {
1288                     Scalar { value, .. } => {
1289                         return success(Vector {
1290                             element: value,
1291                             count: ty.simd_size(tcx) as u64
1292                         });
1293                     }
1294                     _ => {
1295                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1296                                                 a non-machine element type `{}`",
1297                                                 ty, element));
1298                     }
1299                 }
1300             }
1301
1302             // ADTs.
1303             ty::TyAdt(def, substs) => {
1304                 if def.variants.is_empty() {
1305                     // Uninhabitable; represent as unit
1306                     // (Typechecking will reject discriminant-sizing attrs.)
1307
1308                     return success(Univariant {
1309                         variant: Struct::new(dl, &vec![],
1310                           &def.repr, StructKind::AlwaysSizedUnivariant, ty)?,
1311                         non_zero: false
1312                     });
1313                 }
1314
1315                 if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
1316                     // All bodies empty -> intlike
1317                     let (mut min, mut max, mut non_zero) = (i64::max_value(),
1318                                                             i64::min_value(),
1319                                                             true);
1320                     for discr in def.discriminants(tcx) {
1321                         let x = discr.to_u128_unchecked() as i64;
1322                         if x == 0 { non_zero = false; }
1323                         if x < min { min = x; }
1324                         if x > max { max = x; }
1325                     }
1326
1327                     // FIXME: should handle i128? signed-value based impl is weird and hard to
1328                     // grok.
1329                     let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1330                     return success(CEnum {
1331                         discr,
1332                         signed,
1333                         non_zero,
1334                         // FIXME: should be u128?
1335                         min: min as u64,
1336                         max: max as u64
1337                     });
1338                 }
1339
1340                 if !def.is_enum() || (def.variants.len() == 1 &&
1341                                       !def.repr.inhibit_enum_layout_opt()) {
1342                     // Struct, or union, or univariant enum equivalent to a struct.
1343                     // (Typechecking will reject discriminant-sizing attrs.)
1344
1345                     let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
1346                         StructKind::AlwaysSizedUnivariant
1347                     } else {
1348                         let param_env = tcx.param_env(def.did);
1349                         let fields = &def.variants[0].fields;
1350                         let last_field = &fields[fields.len()-1];
1351                         let always_sized = tcx.type_of(last_field.did)
1352                           .is_sized(tcx, param_env, DUMMY_SP);
1353                         if !always_sized { StructKind::MaybeUnsizedUnivariant }
1354                         else { StructKind::AlwaysSizedUnivariant }
1355                     };
1356
1357                     let fields = def.variants[0].fields.iter().map(|field| {
1358                         field.ty(tcx, substs).layout(tcx, param_env)
1359                     }).collect::<Result<Vec<_>, _>>()?;
1360                     let layout = if def.is_union() {
1361                         let mut un = Union::new(dl, &def.repr);
1362                         un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
1363                         UntaggedUnion { variants: un }
1364                     } else {
1365                         let st = Struct::new(dl, &fields, &def.repr,
1366                           kind, ty)?;
1367                         let non_zero = Some(def.did) == tcx.lang_items().non_zero();
1368                         Univariant { variant: st, non_zero: non_zero }
1369                     };
1370                     return success(layout);
1371                 }
1372
1373                 // Since there's at least one
1374                 // non-empty body, explicit discriminants should have
1375                 // been rejected by a checker before this point.
1376                 for (i, v) in def.variants.iter().enumerate() {
1377                     if v.discr != ty::VariantDiscr::Relative(i) {
1378                         bug!("non-C-like enum {} with specified discriminants",
1379                             tcx.item_path_str(def.did));
1380                     }
1381                 }
1382
1383                 // Cache the substituted and normalized variant field types.
1384                 let variants = def.variants.iter().map(|v| {
1385                     v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
1386                 }).collect::<Vec<_>>();
1387
1388                 if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() {
1389                     // Nullable pointer optimization
1390                     for discr in 0..2 {
1391                         let other_fields = variants[1 - discr].iter().map(|ty| {
1392                             ty.layout(tcx, param_env)
1393                         });
1394                         if !Struct::would_be_zero_sized(dl, other_fields)? {
1395                             continue;
1396                         }
1397                         let paths = Struct::non_zero_field_paths(tcx,
1398                                                                  param_env,
1399                                                                  variants[discr].iter().cloned(),
1400                                                                  None)?;
1401                         let (mut path, mut path_source) = if let Some(p) = paths { p }
1402                           else { continue };
1403
1404                         // FIXME(eddyb) should take advantage of a newtype.
1405                         if path == &[0] && variants[discr].len() == 1 {
1406                             let value = match *variants[discr][0].layout(tcx, param_env)? {
1407                                 Scalar { value, .. } => value,
1408                                 CEnum { discr, .. } => Int(discr),
1409                                 _ => bug!("Layout::compute: `{}`'s non-zero \
1410                                            `{}` field not scalar?!",
1411                                            ty, variants[discr][0])
1412                             };
1413                             return success(RawNullablePointer {
1414                                 nndiscr: discr as u64,
1415                                 value,
1416                             });
1417                         }
1418
1419                         let st = Struct::new(dl,
1420                             &variants[discr].iter().map(|ty| ty.layout(tcx, param_env))
1421                               .collect::<Result<Vec<_>, _>>()?,
1422                             &def.repr, StructKind::AlwaysSizedUnivariant, ty)?;
1423
1424                         // We have to fix the last element of path here.
1425                         let mut i = *path.last().unwrap();
1426                         i = st.memory_index[i as usize];
1427                         *path.last_mut().unwrap() = i;
1428                         path.push(0); // For GEP through a pointer.
1429                         path.reverse();
1430                         path_source.push(0);
1431                         path_source.reverse();
1432
1433                         return success(StructWrappedNullablePointer {
1434                             nndiscr: discr as u64,
1435                             nonnull: st,
1436                             discrfield: path,
1437                             discrfield_source: path_source
1438                         });
1439                     }
1440                 }
1441
1442                 // The general case.
1443                 let discr_max = (variants.len() - 1) as i64;
1444                 assert!(discr_max >= 0);
1445                 let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
1446                 let mut align = dl.aggregate_align;
1447                 let mut primitive_align = dl.aggregate_align;
1448                 let mut size = Size::from_bytes(0);
1449
1450                 // We're interested in the smallest alignment, so start large.
1451                 let mut start_align = Align::from_bytes(256, 256).unwrap();
1452
1453                 // Create the set of structs that represent each variant
1454                 // Use the minimum integer type we figured out above
1455                 let discr = Scalar { value: Int(min_ity), non_zero: false };
1456                 let mut variants = variants.into_iter().map(|fields| {
1457                     let mut fields = fields.into_iter().map(|field| {
1458                         field.layout(tcx, param_env)
1459                     }).collect::<Result<Vec<_>, _>>()?;
1460                     fields.insert(0, &discr);
1461                     let st = Struct::new(dl,
1462                         &fields,
1463                         &def.repr, StructKind::EnumVariant, ty)?;
1464                     // Find the first field we can't move later
1465                     // to make room for a larger discriminant.
1466                     // It is important to skip the first field.
1467                     for i in st.field_index_by_increasing_offset().skip(1) {
1468                         let field = fields[i];
1469                         let field_align = field.align(dl);
1470                         if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1471                             start_align = start_align.min(field_align);
1472                             break;
1473                         }
1474                     }
1475                     size = cmp::max(size, st.min_size);
1476                     align = align.max(st.align);
1477                     primitive_align = primitive_align.max(st.primitive_align);
1478                     Ok(st)
1479                 }).collect::<Result<Vec<_>, _>>()?;
1480
1481                 // Align the maximum variant size to the largest alignment.
1482                 size = size.abi_align(align);
1483
1484                 if size.bytes() >= dl.obj_size_bound() {
1485                     return Err(LayoutError::SizeOverflow(ty));
1486                 }
1487
1488                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1489                 if typeck_ity < min_ity {
1490                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1491                     // some reason at this point (based on values discriminant can take on). Mostly
1492                     // because this discriminant will be loaded, and then stored into variable of
1493                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1494                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1495                     // discriminant values. That would be a bug, because then, in trans, in order
1496                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1497                     // space necessary to represent would have to be discarded (or layout is wrong
1498                     // on thinking it needs 16 bits)
1499                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1500                          min_ity, typeck_ity);
1501                     // However, it is fine to make discr type however large (as an optimisation)
1502                     // after this point â€“ we’ll just truncate the value we load in trans.
1503                 }
1504
1505                 // Check to see if we should use a different type for the
1506                 // discriminant. We can safely use a type with the same size
1507                 // as the alignment of the first field of each variant.
1508                 // We increase the size of the discriminant to avoid LLVM copying
1509                 // padding when it doesn't need to. This normally causes unaligned
1510                 // load/stores and excessive memcpy/memset operations. By using a
1511                 // bigger integer size, LLVM can be sure about it's contents and
1512                 // won't be so conservative.
1513
1514                 // Use the initial field alignment
1515                 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
1516
1517                 // If the alignment is not larger than the chosen discriminant size,
1518                 // don't use the alignment as the final size.
1519                 if ity <= min_ity {
1520                     ity = min_ity;
1521                 } else {
1522                     // Patch up the variants' first few fields.
1523                     let old_ity_size = Int(min_ity).size(dl);
1524                     let new_ity_size = Int(ity).size(dl);
1525                     for variant in &mut variants {
1526                         for i in variant.offsets.iter_mut() {
1527                             // The first field is the discrimminant, at offset 0.
1528                             // These aren't in order, and we need to skip it.
1529                             if *i <= old_ity_size && *i > Size::from_bytes(0) {
1530                                 *i = new_ity_size;
1531                             }
1532                         }
1533                         // We might be making the struct larger.
1534                         if variant.min_size <= old_ity_size {
1535                             variant.min_size = new_ity_size;
1536                         }
1537                     }
1538                 }
1539
1540                 General {
1541                     discr: ity,
1542                     variants,
1543                     size,
1544                     align,
1545                     primitive_align,
1546                 }
1547             }
1548
1549             // Types with no meaningful known layout.
1550             ty::TyProjection(_) | ty::TyAnon(..) => {
1551                 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
1552                 if ty == normalized {
1553                     return Err(LayoutError::Unknown(ty));
1554                 }
1555                 return normalized.layout(tcx, param_env);
1556             }
1557             ty::TyParam(_) => {
1558                 return Err(LayoutError::Unknown(ty));
1559             }
1560             ty::TyInfer(_) | ty::TyError => {
1561                 bug!("Layout::compute: unexpected type `{}`", ty)
1562             }
1563         };
1564
1565         success(layout)
1566     }
1567
1568     /// Returns true if the layout corresponds to an unsized type.
1569     pub fn is_unsized(&self) -> bool {
1570         match *self {
1571             Scalar {..} | Vector {..} | FatPointer {..} |
1572             CEnum {..} | UntaggedUnion {..} | General {..} |
1573             RawNullablePointer {..} |
1574             StructWrappedNullablePointer {..} => false,
1575
1576             Array { sized, .. } |
1577             Univariant { variant: Struct { sized, .. }, .. } => !sized
1578         }
1579     }
1580
1581     pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
1582         let dl = cx.data_layout();
1583
1584         match *self {
1585             Scalar { value, .. } | RawNullablePointer { value, .. } => {
1586                 value.size(dl)
1587             }
1588
1589             Vector { element, count } => {
1590                 let element_size = element.size(dl);
1591                 let vec_size = match element_size.checked_mul(count, dl) {
1592                     Some(size) => size,
1593                     None => bug!("Layout::size({:?}): {} * {} overflowed",
1594                                  self, element_size.bytes(), count)
1595                 };
1596                 vec_size.abi_align(self.align(dl))
1597             }
1598
1599             Array { element_size, count, .. } => {
1600                 match element_size.checked_mul(count, dl) {
1601                     Some(size) => size,
1602                     None => bug!("Layout::size({:?}): {} * {} overflowed",
1603                                  self, element_size.bytes(), count)
1604                 }
1605             }
1606
1607             FatPointer { metadata, .. } => {
1608                 // Effectively a (ptr, meta) tuple.
1609                 Pointer.size(dl).abi_align(metadata.align(dl))
1610                        .checked_add(metadata.size(dl), dl).unwrap()
1611                        .abi_align(self.align(dl))
1612             }
1613
1614             CEnum { discr, .. } => Int(discr).size(dl),
1615             General { size, .. } => size,
1616             UntaggedUnion { ref variants } => variants.stride(),
1617
1618             Univariant { ref variant, .. } |
1619             StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1620                 variant.stride()
1621             }
1622         }
1623     }
1624
1625     pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
1626         let dl = cx.data_layout();
1627
1628         match *self {
1629             Scalar { value, .. } | RawNullablePointer { value, .. } => {
1630                 value.align(dl)
1631             }
1632
1633             Vector { element, count } => {
1634                 let elem_size = element.size(dl);
1635                 let vec_size = match elem_size.checked_mul(count, dl) {
1636                     Some(size) => size,
1637                     None => bug!("Layout::align({:?}): {} * {} overflowed",
1638                                  self, elem_size.bytes(), count)
1639                 };
1640                 for &(size, align) in &dl.vector_align {
1641                     if size == vec_size {
1642                         return align;
1643                     }
1644                 }
1645                 // Default to natural alignment, which is what LLVM does.
1646                 // That is, use the size, rounded up to a power of 2.
1647                 let align = vec_size.bytes().next_power_of_two();
1648                 Align::from_bytes(align, align).unwrap()
1649             }
1650
1651             FatPointer { metadata, .. } => {
1652                 // Effectively a (ptr, meta) tuple.
1653                 Pointer.align(dl).max(metadata.align(dl))
1654             }
1655
1656             CEnum { discr, .. } => Int(discr).align(dl),
1657             Array { align, .. } | General { align, .. } => align,
1658             UntaggedUnion { ref variants } => variants.align,
1659
1660             Univariant { ref variant, .. } |
1661             StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1662                 variant.align
1663             }
1664         }
1665     }
1666
1667     /// Returns alignment before repr alignment is applied
1668     pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
1669         match *self {
1670             Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
1671             Univariant { ref variant, .. } |
1672             StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1673                 variant.primitive_align
1674             },
1675
1676             _ => self.align(dl)
1677         }
1678     }
1679
1680     /// Returns repr alignment if it is greater than the primitive alignment.
1681     pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
1682         let align = self.align(dl);
1683         let primitive_align = self.primitive_align(dl);
1684         if align.abi() > primitive_align.abi() {
1685             Some(align.abi() as u32)
1686         } else {
1687             None
1688         }
1689     }
1690
1691     pub fn field_offset<C: HasDataLayout>(&self,
1692                                           cx: C,
1693                                           i: usize,
1694                                           variant_index: Option<usize>)
1695                                           -> Size {
1696         let dl = cx.data_layout();
1697
1698         match *self {
1699             Scalar { .. } |
1700             CEnum { .. } |
1701             UntaggedUnion { .. } |
1702             RawNullablePointer { .. } => {
1703                 Size::from_bytes(0)
1704             }
1705
1706             Vector { element, count } => {
1707                 let element_size = element.size(dl);
1708                 let i = i as u64;
1709                 assert!(i < count);
1710                 Size::from_bytes(element_size.bytes() * count)
1711             }
1712
1713             Array { element_size, count, .. } => {
1714                 let i = i as u64;
1715                 assert!(i < count);
1716                 Size::from_bytes(element_size.bytes() * count)
1717             }
1718
1719             FatPointer { metadata, .. } => {
1720                 // Effectively a (ptr, meta) tuple.
1721                 assert!(i < 2);
1722                 if i == 0 {
1723                     Size::from_bytes(0)
1724                 } else {
1725                     Pointer.size(dl).abi_align(metadata.align(dl))
1726                 }
1727             }
1728
1729             Univariant { ref variant, .. } => variant.offsets[i],
1730
1731             General { ref variants, .. } => {
1732                 let v = variant_index.expect("variant index required");
1733                 variants[v].offsets[i + 1]
1734             }
1735
1736             StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
1737                 if Some(nndiscr as usize) == variant_index {
1738                     nonnull.offsets[i]
1739                 } else {
1740                     Size::from_bytes(0)
1741                 }
1742             }
1743         }
1744     }
1745
1746     /// This is invoked by the `layout_raw` query to record the final
1747     /// layout of each type.
1748     #[inline]
1749     pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1750                                       ty: Ty<'tcx>,
1751                                       param_env: ty::ParamEnv<'tcx>,
1752                                       layout: &Layout) {
1753         // If we are running with `-Zprint-type-sizes`, record layouts for
1754         // dumping later. Ignore layouts that are done with non-empty
1755         // environments or non-monomorphic layouts, as the user only wants
1756         // to see the stuff resulting from the final trans session.
1757         if
1758             !tcx.sess.opts.debugging_opts.print_type_sizes ||
1759             ty.has_param_types() ||
1760             ty.has_self_ty() ||
1761             !param_env.caller_bounds.is_empty()
1762         {
1763             return;
1764         }
1765
1766         Self::record_layout_for_printing_outlined(tcx, ty, param_env, layout)
1767     }
1768
1769     fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1770                                            ty: Ty<'tcx>,
1771                                            param_env: ty::ParamEnv<'tcx>,
1772                                            layout: &Layout) {
1773         // (delay format until we actually need it)
1774         let record = |kind, opt_discr_size, variants| {
1775             let type_desc = format!("{:?}", ty);
1776             let overall_size = layout.size(tcx);
1777             let align = layout.align(tcx);
1778             tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1779                                                               type_desc,
1780                                                               align,
1781                                                               overall_size,
1782                                                               opt_discr_size,
1783                                                               variants);
1784         };
1785
1786         let (adt_def, substs) = match ty.sty {
1787             ty::TyAdt(ref adt_def, substs) => {
1788                 debug!("print-type-size t: `{:?}` process adt", ty);
1789                 (adt_def, substs)
1790             }
1791
1792             ty::TyClosure(..) => {
1793                 debug!("print-type-size t: `{:?}` record closure", ty);
1794                 record(DataTypeKind::Closure, None, vec![]);
1795                 return;
1796             }
1797
1798             _ => {
1799                 debug!("print-type-size t: `{:?}` skip non-nominal", ty);
1800                 return;
1801             }
1802         };
1803
1804         let adt_kind = adt_def.adt_kind();
1805
1806         let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| {
1807             let layout = field_ty.layout(tcx, param_env);
1808             match layout {
1809                 Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty),
1810                 Ok(field_layout) => {
1811                     session::FieldInfo {
1812                         name: field_name.to_string(),
1813                         offset: offset.bytes(),
1814                         size: field_layout.size(tcx).bytes(),
1815                         align: field_layout.align(tcx).abi(),
1816                     }
1817                 }
1818             }
1819         };
1820
1821         let build_primitive_info = |name: ast::Name, value: &Primitive| {
1822             session::VariantInfo {
1823                 name: Some(name.to_string()),
1824                 kind: session::SizeKind::Exact,
1825                 align: value.align(tcx).abi(),
1826                 size: value.size(tcx).bytes(),
1827                 fields: vec![],
1828             }
1829         };
1830
1831         enum Fields<'a> {
1832             WithDiscrim(&'a Struct),
1833             NoDiscrim(&'a Struct),
1834         }
1835
1836         let build_variant_info = |n: Option<ast::Name>,
1837                                   flds: &[(ast::Name, Ty<'tcx>)],
1838                                   layout: Fields| {
1839             let (s, field_offsets) = match layout {
1840                 Fields::WithDiscrim(s) => (s, &s.offsets[1..]),
1841                 Fields::NoDiscrim(s) => (s, &s.offsets[0..]),
1842             };
1843             let field_info: Vec<_> =
1844                 flds.iter()
1845                     .zip(field_offsets.iter())
1846                     .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset))
1847                     .collect();
1848
1849             session::VariantInfo {
1850                 name: n.map(|n|n.to_string()),
1851                 kind: if s.sized {
1852                     session::SizeKind::Exact
1853                 } else {
1854                     session::SizeKind::Min
1855                 },
1856                 align: s.align.abi(),
1857                 size: s.min_size.bytes(),
1858                 fields: field_info,
1859             }
1860         };
1861
1862         match *layout {
1863             Layout::StructWrappedNullablePointer { nonnull: ref variant_layout,
1864                                                    nndiscr,
1865                                                    discrfield: _,
1866                                                    discrfield_source: _ } => {
1867                 debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}",
1868                        ty, nndiscr, variant_layout);
1869                 let variant_def = &adt_def.variants[nndiscr as usize];
1870                 let fields: Vec<_> =
1871                     variant_def.fields.iter()
1872                                       .map(|field_def| (field_def.name, field_def.ty(tcx, substs)))
1873                                       .collect();
1874                 record(adt_kind.into(),
1875                        None,
1876                        vec![build_variant_info(Some(variant_def.name),
1877                                                &fields,
1878                                                Fields::NoDiscrim(variant_layout))]);
1879             }
1880             Layout::RawNullablePointer { nndiscr, value } => {
1881                 debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}",
1882                        ty, nndiscr, value);
1883                 let variant_def = &adt_def.variants[nndiscr as usize];
1884                 record(adt_kind.into(), None,
1885                        vec![build_primitive_info(variant_def.name, &value)]);
1886             }
1887             Layout::Univariant { variant: ref variant_layout, non_zero: _ } => {
1888                 let variant_names = || {
1889                     adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::<Vec<_>>()
1890                 };
1891                 debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}",
1892                        ty, variant_layout, variant_names());
1893                 assert!(adt_def.variants.len() <= 1,
1894                         "univariant with variants {:?}", variant_names());
1895                 if adt_def.variants.len() == 1 {
1896                     let variant_def = &adt_def.variants[0];
1897                     let fields: Vec<_> =
1898                         variant_def.fields.iter()
1899                                           .map(|f| (f.name, f.ty(tcx, substs)))
1900                                           .collect();
1901                     record(adt_kind.into(),
1902                            None,
1903                            vec![build_variant_info(Some(variant_def.name),
1904                                                    &fields,
1905                                                    Fields::NoDiscrim(variant_layout))]);
1906                 } else {
1907                     // (This case arises for *empty* enums; so give it
1908                     // zero variants.)
1909                     record(adt_kind.into(), None, vec![]);
1910                 }
1911             }
1912
1913             Layout::General { ref variants, discr, .. } => {
1914                 debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}",
1915                        ty, adt_def.variants.len(), variants.len(), variants);
1916                 let variant_infos: Vec<_> =
1917                     adt_def.variants.iter()
1918                                     .zip(variants.iter())
1919                                     .map(|(variant_def, variant_layout)| {
1920                                         let fields: Vec<_> =
1921                                             variant_def.fields
1922                                                        .iter()
1923                                                        .map(|f| (f.name, f.ty(tcx, substs)))
1924                                                        .collect();
1925                                         build_variant_info(Some(variant_def.name),
1926                                                            &fields,
1927                                                            Fields::WithDiscrim(variant_layout))
1928                                     })
1929                                     .collect();
1930                 record(adt_kind.into(), Some(discr.size()), variant_infos);
1931             }
1932
1933             Layout::UntaggedUnion { ref variants } => {
1934                 debug!("print-type-size t: `{:?}` adt union variants {:?}",
1935                        ty, variants);
1936                 // layout does not currently store info about each
1937                 // variant...
1938                 record(adt_kind.into(), None, Vec::new());
1939             }
1940
1941             Layout::CEnum { discr, .. } => {
1942                 debug!("print-type-size t: `{:?}` adt c-like enum", ty);
1943                 let variant_infos: Vec<_> =
1944                     adt_def.variants.iter()
1945                                     .map(|variant_def| {
1946                                         build_primitive_info(variant_def.name,
1947                                                              &Primitive::Int(discr))
1948                                     })
1949                                     .collect();
1950                 record(adt_kind.into(), Some(discr.size()), variant_infos);
1951             }
1952
1953             // other cases provide little interesting (i.e. adjustable
1954             // via representation tweaks) size info beyond total size.
1955             Layout::Scalar { .. } |
1956             Layout::Vector { .. } |
1957             Layout::Array { .. } |
1958             Layout::FatPointer { .. } => {
1959                 debug!("print-type-size t: `{:?}` adt other", ty);
1960                 record(adt_kind.into(), None, Vec::new())
1961             }
1962         }
1963     }
1964 }
1965
1966 /// Type size "skeleton", i.e. the only information determining a type's size.
1967 /// While this is conservative, (aside from constant sizes, only pointers,
1968 /// newtypes thereof and null pointer optimized enums are allowed), it is
1969 /// enough to statically check common usecases of transmute.
1970 #[derive(Copy, Clone, Debug)]
1971 pub enum SizeSkeleton<'tcx> {
1972     /// Any statically computable Layout.
1973     Known(Size),
1974
1975     /// A potentially-fat pointer.
1976     Pointer {
1977         /// If true, this pointer is never null.
1978         non_zero: bool,
1979         /// The type which determines the unsized metadata, if any,
1980         /// of this pointer. Either a type parameter or a projection
1981         /// depending on one, with regions erased.
1982         tail: Ty<'tcx>
1983     }
1984 }
1985
1986 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1987     pub fn compute(ty: Ty<'tcx>,
1988                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1989                    param_env: ty::ParamEnv<'tcx>)
1990                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1991         assert!(!ty.has_infer_types());
1992
1993         // First try computing a static layout.
1994         let err = match ty.layout(tcx, param_env) {
1995             Ok(layout) => {
1996                 return Ok(SizeSkeleton::Known(layout.size(tcx)));
1997             }
1998             Err(err) => err
1999         };
2000
2001         let ptr_skeleton = |pointee: Ty<'tcx>| {
2002             let non_zero = !ty.is_unsafe_ptr();
2003             let tail = tcx.struct_tail(pointee);
2004             match tail.sty {
2005                 ty::TyParam(_) | ty::TyProjection(_) => {
2006                     assert!(tail.has_param_types() || tail.has_self_ty());
2007                     Ok(SizeSkeleton::Pointer {
2008                         non_zero,
2009                         tail: tcx.erase_regions(&tail)
2010                     })
2011                 }
2012                 _ => {
2013                     bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
2014                             tail `{}` is not a type parameter or a projection",
2015                             ty, err, tail)
2016                 }
2017             }
2018         };
2019
2020         match ty.sty {
2021             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2022             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2023                 ptr_skeleton(pointee)
2024             }
2025             ty::TyAdt(def, _) if def.is_box() => {
2026                 ptr_skeleton(ty.boxed_ty())
2027             }
2028
2029             ty::TyAdt(def, substs) => {
2030                 // Only newtypes and enums w/ nullable pointer optimization.
2031                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
2032                     return Err(err);
2033                 }
2034
2035                 // Get a zero-sized variant or a pointer newtype.
2036                 let zero_or_ptr_variant = |i: usize| {
2037                     let fields = def.variants[i].fields.iter().map(|field| {
2038                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2039                     });
2040                     let mut ptr = None;
2041                     for field in fields {
2042                         let field = field?;
2043                         match field {
2044                             SizeSkeleton::Known(size) => {
2045                                 if size.bytes() > 0 {
2046                                     return Err(err);
2047                                 }
2048                             }
2049                             SizeSkeleton::Pointer {..} => {
2050                                 if ptr.is_some() {
2051                                     return Err(err);
2052                                 }
2053                                 ptr = Some(field);
2054                             }
2055                         }
2056                     }
2057                     Ok(ptr)
2058                 };
2059
2060                 let v0 = zero_or_ptr_variant(0)?;
2061                 // Newtype.
2062                 if def.variants.len() == 1 {
2063                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2064                         return Ok(SizeSkeleton::Pointer {
2065                             non_zero: non_zero ||
2066                                 Some(def.did) == tcx.lang_items().non_zero(),
2067                             tail,
2068                         });
2069                     } else {
2070                         return Err(err);
2071                     }
2072                 }
2073
2074                 let v1 = zero_or_ptr_variant(1)?;
2075                 // Nullable pointer enum optimization.
2076                 match (v0, v1) {
2077                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
2078                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2079                         Ok(SizeSkeleton::Pointer {
2080                             non_zero: false,
2081                             tail,
2082                         })
2083                     }
2084                     _ => Err(err)
2085                 }
2086             }
2087
2088             ty::TyProjection(_) | ty::TyAnon(..) => {
2089                 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
2090                 if ty == normalized {
2091                     Err(err)
2092                 } else {
2093                     SizeSkeleton::compute(normalized, tcx, param_env)
2094                 }
2095             }
2096
2097             _ => Err(err)
2098         }
2099     }
2100
2101     pub fn same_size(self, other: SizeSkeleton) -> bool {
2102         match (self, other) {
2103             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2104             (SizeSkeleton::Pointer { tail: a, .. },
2105              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
2106             _ => false
2107         }
2108     }
2109 }
2110
2111 /// A pair of a type and its layout. Implements various
2112 /// type traversal APIs (e.g. recursing into fields).
2113 #[derive(Copy, Clone, Debug)]
2114 pub struct TyLayout<'tcx> {
2115     pub ty: Ty<'tcx>,
2116     pub layout: &'tcx Layout,
2117     pub variant_index: Option<usize>,
2118 }
2119
2120 impl<'tcx> Deref for TyLayout<'tcx> {
2121     type Target = Layout;
2122     fn deref(&self) -> &Layout {
2123         self.layout
2124     }
2125 }
2126
2127 pub trait LayoutTyper<'tcx>: HasDataLayout {
2128     type TyLayout;
2129
2130     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
2131     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
2132     fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>;
2133 }
2134
2135 /// Combines a tcx with the parameter environment so that you can
2136 /// compute layout operations.
2137 #[derive(Copy, Clone)]
2138 pub struct LayoutCx<'a, 'tcx: 'a> {
2139     tcx: TyCtxt<'a, 'tcx, 'tcx>,
2140     param_env: ty::ParamEnv<'tcx>,
2141 }
2142
2143 impl<'a, 'tcx> LayoutCx<'a, 'tcx> {
2144     pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
2145         LayoutCx { tcx, param_env }
2146     }
2147 }
2148
2149 impl<'a, 'tcx> HasDataLayout for LayoutCx<'a, 'tcx> {
2150     fn data_layout(&self) -> &TargetDataLayout {
2151         &self.tcx.data_layout
2152     }
2153 }
2154
2155 impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> {
2156     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
2157
2158     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
2159         self.tcx
2160     }
2161
2162     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
2163         let ty = self.normalize_projections(ty);
2164
2165         Ok(TyLayout {
2166             ty,
2167             layout: ty.layout(self.tcx, self.param_env)?,
2168             variant_index: None
2169         })
2170     }
2171
2172     fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
2173         self.tcx.normalize_associated_type_in_env(&ty, self.param_env)
2174     }
2175 }
2176
2177 impl<'a, 'tcx> TyLayout<'tcx> {
2178     pub fn for_variant(&self, variant_index: usize) -> Self {
2179         TyLayout {
2180             variant_index: Some(variant_index),
2181             ..*self
2182         }
2183     }
2184
2185     pub fn field_offset<C: HasDataLayout>(&self, cx: C, i: usize) -> Size {
2186         self.layout.field_offset(cx, i, self.variant_index)
2187     }
2188
2189     pub fn field_count(&self) -> usize {
2190         // Handle enum/union through the type rather than Layout.
2191         if let ty::TyAdt(def, _) = self.ty.sty {
2192             let v = self.variant_index.unwrap_or(0);
2193             if def.variants.is_empty() {
2194                 assert_eq!(v, 0);
2195                 return 0;
2196             } else {
2197                 return def.variants[v].fields.len();
2198             }
2199         }
2200
2201         match *self.layout {
2202             Scalar { .. } => {
2203                 bug!("TyLayout::field_count({:?}): not applicable", self)
2204             }
2205
2206             // Handled above (the TyAdt case).
2207             CEnum { .. } |
2208             General { .. } |
2209             UntaggedUnion { .. } |
2210             RawNullablePointer { .. } |
2211             StructWrappedNullablePointer { .. } => bug!(),
2212
2213             FatPointer { .. } => 2,
2214
2215             Vector { count, .. } |
2216             Array { count, .. } => {
2217                 let usize_count = count as usize;
2218                 assert_eq!(usize_count as u64, count);
2219                 usize_count
2220             }
2221
2222             Univariant { ref variant, .. } => variant.offsets.len(),
2223         }
2224     }
2225
2226     pub fn field_type<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
2227         let tcx = cx.tcx();
2228
2229         let ptr_field_type = |pointee: Ty<'tcx>| {
2230             assert!(i < 2);
2231             let slice = |element: Ty<'tcx>| {
2232                 if i == 0 {
2233                     tcx.mk_mut_ptr(element)
2234                 } else {
2235                     tcx.types.usize
2236                 }
2237             };
2238             match tcx.struct_tail(pointee).sty {
2239                 ty::TySlice(element) => slice(element),
2240                 ty::TyStr => slice(tcx.types.u8),
2241                 ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()),
2242                 _ => bug!("TyLayout::field_type({:?}): not applicable", self)
2243             }
2244         };
2245
2246         match self.ty.sty {
2247             ty::TyBool |
2248             ty::TyChar |
2249             ty::TyInt(_) |
2250             ty::TyUint(_) |
2251             ty::TyFloat(_) |
2252             ty::TyFnPtr(_) |
2253             ty::TyNever |
2254             ty::TyFnDef(..) |
2255             ty::TyDynamic(..) => {
2256                 bug!("TyLayout::field_type({:?}): not applicable", self)
2257             }
2258
2259             // Potentially-fat pointers.
2260             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2261             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2262                 ptr_field_type(pointee)
2263             }
2264             ty::TyAdt(def, _) if def.is_box() => {
2265                 ptr_field_type(self.ty.boxed_ty())
2266             }
2267
2268             // Arrays and slices.
2269             ty::TyArray(element, _) |
2270             ty::TySlice(element) => element,
2271             ty::TyStr => tcx.types.u8,
2272
2273             // Tuples, generators and closures.
2274             ty::TyClosure(def_id, ref substs) => {
2275                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2276             }
2277
2278             ty::TyGenerator(def_id, ref substs, _) => {
2279                 substs.field_tys(def_id, tcx).nth(i).unwrap()
2280             }
2281
2282             ty::TyTuple(tys, _) => tys[i],
2283
2284             // SIMD vector types.
2285             ty::TyAdt(def, ..) if def.repr.simd() => {
2286                 self.ty.simd_type(tcx)
2287             }
2288
2289             // ADTs.
2290             ty::TyAdt(def, substs) => {
2291                 def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
2292             }
2293
2294             ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
2295             ty::TyInfer(_) | ty::TyError => {
2296                 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
2297             }
2298         }
2299     }
2300
2301     pub fn field<C: LayoutTyper<'tcx>>(&self,
2302                                        cx: C,
2303                                        i: usize)
2304                                        -> C::TyLayout {
2305         cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
2306     }
2307 }
2308
2309 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Layout
2310 {
2311     fn hash_stable<W: StableHasherResult>(&self,
2312                                           hcx: &mut StableHashingContext<'gcx>,
2313                                           hasher: &mut StableHasher<W>) {
2314         use ty::layout::Layout::*;
2315         mem::discriminant(self).hash_stable(hcx, hasher);
2316
2317         match *self {
2318             Scalar { value, non_zero } => {
2319                 value.hash_stable(hcx, hasher);
2320                 non_zero.hash_stable(hcx, hasher);
2321             }
2322             Vector { element, count } => {
2323                 element.hash_stable(hcx, hasher);
2324                 count.hash_stable(hcx, hasher);
2325             }
2326             Array { sized, align, primitive_align, element_size, count } => {
2327                 sized.hash_stable(hcx, hasher);
2328                 align.hash_stable(hcx, hasher);
2329                 primitive_align.hash_stable(hcx, hasher);
2330                 element_size.hash_stable(hcx, hasher);
2331                 count.hash_stable(hcx, hasher);
2332             }
2333             FatPointer { ref metadata, non_zero } => {
2334                 metadata.hash_stable(hcx, hasher);
2335                 non_zero.hash_stable(hcx, hasher);
2336             }
2337             CEnum { discr, signed, non_zero, min, max } => {
2338                 discr.hash_stable(hcx, hasher);
2339                 signed.hash_stable(hcx, hasher);
2340                 non_zero.hash_stable(hcx, hasher);
2341                 min.hash_stable(hcx, hasher);
2342                 max.hash_stable(hcx, hasher);
2343             }
2344             Univariant { ref variant, non_zero } => {
2345                 variant.hash_stable(hcx, hasher);
2346                 non_zero.hash_stable(hcx, hasher);
2347             }
2348             UntaggedUnion { ref variants } => {
2349                 variants.hash_stable(hcx, hasher);
2350             }
2351             General { discr, ref variants, size, align, primitive_align } => {
2352                 discr.hash_stable(hcx, hasher);
2353                 variants.hash_stable(hcx, hasher);
2354                 size.hash_stable(hcx, hasher);
2355                 align.hash_stable(hcx, hasher);
2356                 primitive_align.hash_stable(hcx, hasher);
2357             }
2358             RawNullablePointer { nndiscr, ref value } => {
2359                 nndiscr.hash_stable(hcx, hasher);
2360                 value.hash_stable(hcx, hasher);
2361             }
2362             StructWrappedNullablePointer {
2363                 nndiscr,
2364                 ref nonnull,
2365                 ref discrfield,
2366                 ref discrfield_source
2367             } => {
2368                 nndiscr.hash_stable(hcx, hasher);
2369                 nonnull.hash_stable(hcx, hasher);
2370                 discrfield.hash_stable(hcx, hasher);
2371                 discrfield_source.hash_stable(hcx, hasher);
2372             }
2373         }
2374     }
2375 }
2376
2377 impl_stable_hash_for!(enum ::ty::layout::Integer {
2378     I1,
2379     I8,
2380     I16,
2381     I32,
2382     I64,
2383     I128
2384 });
2385
2386 impl_stable_hash_for!(enum ::ty::layout::Primitive {
2387     Int(integer),
2388     F32,
2389     F64,
2390     Pointer
2391 });
2392
2393 impl_stable_hash_for!(struct ::ty::layout::Align {
2394     abi,
2395     pref
2396 });
2397
2398 impl_stable_hash_for!(struct ::ty::layout::Size {
2399     raw
2400 });
2401
2402 impl<'gcx> HashStable<StableHashingContext<'gcx>> for LayoutError<'gcx>
2403 {
2404     fn hash_stable<W: StableHasherResult>(&self,
2405                                           hcx: &mut StableHashingContext<'gcx>,
2406                                           hasher: &mut StableHasher<W>) {
2407         use ty::layout::LayoutError::*;
2408         mem::discriminant(self).hash_stable(hcx, hasher);
2409
2410         match *self {
2411             Unknown(t) |
2412             SizeOverflow(t) => t.hash_stable(hcx, hasher)
2413         }
2414     }
2415 }
2416
2417 impl_stable_hash_for!(struct ::ty::layout::Struct {
2418     align,
2419     primitive_align,
2420     packed,
2421     sized,
2422     offsets,
2423     memory_index,
2424     min_size
2425 });
2426
2427 impl_stable_hash_for!(struct ::ty::layout::Union {
2428     align,
2429     primitive_align,
2430     min_size,
2431     packed
2432 });