]> git.lizzy.rs Git - rust.git/blob - src/librustc/ty/layout.rs
Auto merge of #44375 - topecongiro:macrodef-span, r=petrochenkov
[rust.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 pub use self::Integer::*;
12 pub use self::Layout::*;
13 pub use self::Primitive::*;
14
15 use session::{self, DataTypeKind, Session};
16 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags};
17
18 use syntax::ast::{self, FloatTy, IntTy, UintTy};
19 use syntax::attr;
20 use syntax_pos::DUMMY_SP;
21
22 use std::cmp;
23 use std::fmt;
24 use std::i64;
25 use std::iter;
26 use std::ops::Deref;
27
28 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
29 /// for a target, which contains everything needed to compute layouts.
30 pub struct TargetDataLayout {
31     pub endian: Endian,
32     pub i1_align: Align,
33     pub i8_align: Align,
34     pub i16_align: Align,
35     pub i32_align: Align,
36     pub i64_align: Align,
37     pub i128_align: Align,
38     pub f32_align: Align,
39     pub f64_align: Align,
40     pub pointer_size: Size,
41     pub pointer_align: Align,
42     pub aggregate_align: Align,
43
44     /// Alignments for vector types.
45     pub vector_align: Vec<(Size, Align)>
46 }
47
48 impl Default for TargetDataLayout {
49     /// Creates an instance of `TargetDataLayout`.
50     fn default() -> TargetDataLayout {
51         TargetDataLayout {
52             endian: Endian::Big,
53             i1_align: Align::from_bits(8, 8).unwrap(),
54             i8_align: Align::from_bits(8, 8).unwrap(),
55             i16_align: Align::from_bits(16, 16).unwrap(),
56             i32_align: Align::from_bits(32, 32).unwrap(),
57             i64_align: Align::from_bits(32, 64).unwrap(),
58             i128_align: Align::from_bits(32, 64).unwrap(),
59             f32_align: Align::from_bits(32, 32).unwrap(),
60             f64_align: Align::from_bits(64, 64).unwrap(),
61             pointer_size: Size::from_bits(64),
62             pointer_align: Align::from_bits(64, 64).unwrap(),
63             aggregate_align: Align::from_bits(0, 64).unwrap(),
64             vector_align: vec![
65                 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
66                 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
67             ]
68         }
69     }
70 }
71
72 impl TargetDataLayout {
73     pub fn parse(sess: &Session) -> TargetDataLayout {
74         // Parse a bit count from a string.
75         let parse_bits = |s: &str, kind: &str, cause: &str| {
76             s.parse::<u64>().unwrap_or_else(|err| {
77                 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
78                                   kind, s, cause, err));
79                 0
80             })
81         };
82
83         // Parse a size string.
84         let size = |s: &str, cause: &str| {
85             Size::from_bits(parse_bits(s, "size", cause))
86         };
87
88         // Parse an alignment string.
89         let align = |s: &[&str], cause: &str| {
90             if s.is_empty() {
91                 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
92             }
93             let abi = parse_bits(s[0], "alignment", cause);
94             let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
95             Align::from_bits(abi, pref).unwrap_or_else(|err| {
96                 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
97                                   cause, err));
98                 Align::from_bits(8, 8).unwrap()
99             })
100         };
101
102         let mut dl = TargetDataLayout::default();
103         let mut i128_align_src = 64;
104         for spec in sess.target.target.data_layout.split("-") {
105             match &spec.split(":").collect::<Vec<_>>()[..] {
106                 &["e"] => dl.endian = Endian::Little,
107                 &["E"] => dl.endian = Endian::Big,
108                 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
109                 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
110                 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
111                 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
112                     dl.pointer_size = size(s, p);
113                     dl.pointer_align = align(a, p);
114                 }
115                 &[s, ref a..] if s.starts_with("i") => {
116                     let bits = match s[1..].parse::<u64>() {
117                         Ok(bits) => bits,
118                         Err(_) => {
119                             size(&s[1..], "i"); // For the user error.
120                             continue;
121                         }
122                     };
123                     let a = align(a, s);
124                     match bits {
125                         1 => dl.i1_align = a,
126                         8 => dl.i8_align = a,
127                         16 => dl.i16_align = a,
128                         32 => dl.i32_align = a,
129                         64 => dl.i64_align = a,
130                         _ => {}
131                     }
132                     if bits >= i128_align_src && bits <= 128 {
133                         // Default alignment for i128 is decided by taking the alignment of
134                         // largest-sized i{64...128}.
135                         i128_align_src = bits;
136                         dl.i128_align = a;
137                     }
138                 }
139                 &[s, ref a..] if s.starts_with("v") => {
140                     let v_size = size(&s[1..], "v");
141                     let a = align(a, s);
142                     if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
143                         v.1 = a;
144                         continue;
145                     }
146                     // No existing entry, add a new one.
147                     dl.vector_align.push((v_size, a));
148                 }
149                 _ => {} // Ignore everything else.
150             }
151         }
152
153         // Perform consistency checks against the Target information.
154         let endian_str = match dl.endian {
155             Endian::Little => "little",
156             Endian::Big => "big"
157         };
158         if endian_str != sess.target.target.target_endian {
159             sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
160                                architecture is {}-endian, while \"target-endian\" is `{}`",
161                               endian_str, sess.target.target.target_endian));
162         }
163
164         if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
165             sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
166                                pointers are {}-bit, while \"target-pointer-width\" is `{}`",
167                               dl.pointer_size.bits(), sess.target.target.target_pointer_width));
168         }
169
170         dl
171     }
172
173     /// Return exclusive upper bound on object size.
174     ///
175     /// The theoretical maximum object size is defined as the maximum positive `isize` value.
176     /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
177     /// index every address within an object along with one byte past the end, along with allowing
178     /// `isize` to store the difference between any two pointers into an object.
179     ///
180     /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
181     /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
182     /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
183     /// address space on 64-bit ARMv8 and x86_64.
184     pub fn obj_size_bound(&self) -> u64 {
185         match self.pointer_size.bits() {
186             16 => 1 << 15,
187             32 => 1 << 31,
188             64 => 1 << 47,
189             bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
190         }
191     }
192
193     pub fn ptr_sized_integer(&self) -> Integer {
194         match self.pointer_size.bits() {
195             16 => I16,
196             32 => I32,
197             64 => I64,
198             bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
199         }
200     }
201 }
202
203 pub trait HasDataLayout: Copy {
204     fn data_layout(&self) -> &TargetDataLayout;
205 }
206
207 impl<'a> HasDataLayout for &'a TargetDataLayout {
208     fn data_layout(&self) -> &TargetDataLayout {
209         self
210     }
211 }
212
213 impl<'a, 'tcx> HasDataLayout for TyCtxt<'a, 'tcx, 'tcx> {
214     fn data_layout(&self) -> &TargetDataLayout {
215         &self.data_layout
216     }
217 }
218
219 /// Endianness of the target, which must match cfg(target-endian).
220 #[derive(Copy, Clone)]
221 pub enum Endian {
222     Little,
223     Big
224 }
225
226 /// Size of a type in bytes.
227 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
228 pub struct Size {
229     raw: u64
230 }
231
232 impl Size {
233     pub fn from_bits(bits: u64) -> Size {
234         Size::from_bytes((bits + 7) / 8)
235     }
236
237     pub fn from_bytes(bytes: u64) -> Size {
238         if bytes >= (1 << 61) {
239             bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
240         }
241         Size {
242             raw: bytes
243         }
244     }
245
246     pub fn bytes(self) -> u64 {
247         self.raw
248     }
249
250     pub fn bits(self) -> u64 {
251         self.bytes() * 8
252     }
253
254     pub fn abi_align(self, align: Align) -> Size {
255         let mask = align.abi() - 1;
256         Size::from_bytes((self.bytes() + mask) & !mask)
257     }
258
259     pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
260         let dl = cx.data_layout();
261
262         // Each Size is less than dl.obj_size_bound(), so the sum is
263         // also less than 1 << 62 (and therefore can't overflow).
264         let bytes = self.bytes() + offset.bytes();
265
266         if bytes < dl.obj_size_bound() {
267             Some(Size::from_bytes(bytes))
268         } else {
269             None
270         }
271     }
272
273     pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
274         let dl = cx.data_layout();
275
276         // Each Size is less than dl.obj_size_bound(), so the sum is
277         // also less than 1 << 62 (and therefore can't overflow).
278         match self.bytes().checked_mul(count) {
279             Some(bytes) if bytes < dl.obj_size_bound() => {
280                 Some(Size::from_bytes(bytes))
281             }
282             _ => None
283         }
284     }
285 }
286
287 /// Alignment of a type in bytes, both ABI-mandated and preferred.
288 /// Each field is a power of two, giving the alignment a maximum
289 /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with
290 /// a maximum capacity of 2^31 - 1 or 2147483647.
291 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
292 pub struct Align {
293     abi: u8,
294     pref: u8,
295 }
296
297 impl Align {
298     pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
299         Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
300     }
301
302     pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
303         let log2 = |align: u64| {
304             // Treat an alignment of 0 bytes like 1-byte alignment.
305             if align == 0 {
306                 return Ok(0);
307             }
308
309             let mut bytes = align;
310             let mut pow: u8 = 0;
311             while (bytes & 1) == 0 {
312                 pow += 1;
313                 bytes >>= 1;
314             }
315             if bytes != 1 {
316                 Err(format!("`{}` is not a power of 2", align))
317             } else if pow > 30 {
318                 Err(format!("`{}` is too large", align))
319             } else {
320                 Ok(pow)
321             }
322         };
323
324         Ok(Align {
325             abi: log2(abi)?,
326             pref: log2(pref)?,
327         })
328     }
329
330     pub fn abi(self) -> u64 {
331         1 << self.abi
332     }
333
334     pub fn pref(self) -> u64 {
335         1 << self.pref
336     }
337
338     pub fn min(self, other: Align) -> Align {
339         Align {
340             abi: cmp::min(self.abi, other.abi),
341             pref: cmp::min(self.pref, other.pref),
342         }
343     }
344
345     pub fn max(self, other: Align) -> Align {
346         Align {
347             abi: cmp::max(self.abi, other.abi),
348             pref: cmp::max(self.pref, other.pref),
349         }
350     }
351 }
352
353 /// Integers, also used for enum discriminants.
354 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
355 pub enum Integer {
356     I1,
357     I8,
358     I16,
359     I32,
360     I64,
361     I128,
362 }
363
364 impl Integer {
365     pub fn size(&self) -> Size {
366         match *self {
367             I1 => Size::from_bits(1),
368             I8 => Size::from_bytes(1),
369             I16 => Size::from_bytes(2),
370             I32 => Size::from_bytes(4),
371             I64  => Size::from_bytes(8),
372             I128  => Size::from_bytes(16),
373         }
374     }
375
376     pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
377         let dl = cx.data_layout();
378
379         match *self {
380             I1 => dl.i1_align,
381             I8 => dl.i8_align,
382             I16 => dl.i16_align,
383             I32 => dl.i32_align,
384             I64 => dl.i64_align,
385             I128 => dl.i128_align,
386         }
387     }
388
389     pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>,
390                            signed: bool) -> Ty<'tcx> {
391         match (*self, signed) {
392             (I1, false) => tcx.types.u8,
393             (I8, false) => tcx.types.u8,
394             (I16, false) => tcx.types.u16,
395             (I32, false) => tcx.types.u32,
396             (I64, false) => tcx.types.u64,
397             (I128, false) => tcx.types.u128,
398             (I1, true) => tcx.types.i8,
399             (I8, true) => tcx.types.i8,
400             (I16, true) => tcx.types.i16,
401             (I32, true) => tcx.types.i32,
402             (I64, true) => tcx.types.i64,
403             (I128, true) => tcx.types.i128,
404         }
405     }
406
407     /// Find the smallest Integer type which can represent the signed value.
408     pub fn fit_signed(x: i64) -> Integer {
409         match x {
410             -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1,
411             -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
412             -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
413             -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
414             -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
415             _ => I128
416         }
417     }
418
419     /// Find the smallest Integer type which can represent the unsigned value.
420     pub fn fit_unsigned(x: u64) -> Integer {
421         match x {
422             0...0x0000_0000_0000_0001 => I1,
423             0...0x0000_0000_0000_00ff => I8,
424             0...0x0000_0000_0000_ffff => I16,
425             0...0x0000_0000_ffff_ffff => I32,
426             0...0xffff_ffff_ffff_ffff => I64,
427             _ => I128,
428         }
429     }
430
431     /// Find the smallest integer with the given alignment.
432     pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
433         let dl = cx.data_layout();
434
435         let wanted = align.abi();
436         for &candidate in &[I8, I16, I32, I64] {
437             let ty = Int(candidate);
438             if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
439                 return Some(candidate);
440             }
441         }
442         None
443     }
444
445     /// Get the Integer type from an attr::IntType.
446     pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
447         let dl = cx.data_layout();
448
449         match ity {
450             attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
451             attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
452             attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
453             attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
454             attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
455             attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
456                 dl.ptr_sized_integer()
457             }
458         }
459     }
460
461     /// Find the appropriate Integer type and signedness for the given
462     /// signed discriminant range and #[repr] attribute.
463     /// N.B.: u64 values above i64::MAX will be treated as signed, but
464     /// that shouldn't affect anything, other than maybe debuginfo.
465     fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
466                             ty: Ty<'tcx>,
467                             repr: &ReprOptions,
468                             min: i64,
469                             max: i64)
470                             -> (Integer, bool) {
471         // Theoretically, negative values could be larger in unsigned representation
472         // than the unsigned representation of the signed minimum. However, if there
473         // are any negative values, the only valid unsigned representation is u64
474         // which can fit all i64 values, so the result remains unaffected.
475         let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
476         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
477
478         let mut min_from_extern = None;
479         let min_default = I8;
480
481         if let Some(ity) = repr.int {
482             let discr = Integer::from_attr(tcx, ity);
483             let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
484             if discr < fit {
485                 bug!("Integer::repr_discr: `#[repr]` hint too small for \
486                   discriminant range of enum `{}", ty)
487             }
488             return (discr, ity.is_signed());
489         }
490
491         if repr.c() {
492             match &tcx.sess.target.target.arch[..] {
493                 // WARNING: the ARM EABI has two variants; the one corresponding
494                 // to `at_least == I32` appears to be used on Linux and NetBSD,
495                 // but some systems may use the variant corresponding to no
496                 // lower bound.  However, we don't run on those yet...?
497                 "arm" => min_from_extern = Some(I32),
498                 _ => min_from_extern = Some(I32),
499             }
500         }
501
502         let at_least = min_from_extern.unwrap_or(min_default);
503
504         // If there are no negative values, we can use the unsigned fit.
505         if min >= 0 {
506             (cmp::max(unsigned_fit, at_least), false)
507         } else {
508             (cmp::max(signed_fit, at_least), true)
509         }
510     }
511 }
512
513 /// Fundamental unit of memory access and layout.
514 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
515 pub enum Primitive {
516     Int(Integer),
517     F32,
518     F64,
519     Pointer
520 }
521
522 impl Primitive {
523     pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
524         let dl = cx.data_layout();
525
526         match self {
527             Int(I1) | Int(I8) => Size::from_bits(8),
528             Int(I16) => Size::from_bits(16),
529             Int(I32) | F32 => Size::from_bits(32),
530             Int(I64) | F64 => Size::from_bits(64),
531             Int(I128) => Size::from_bits(128),
532             Pointer => dl.pointer_size
533         }
534     }
535
536     pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
537         let dl = cx.data_layout();
538
539         match self {
540             Int(I1) => dl.i1_align,
541             Int(I8) => dl.i8_align,
542             Int(I16) => dl.i16_align,
543             Int(I32) => dl.i32_align,
544             Int(I64) => dl.i64_align,
545             Int(I128) => dl.i128_align,
546             F32 => dl.f32_align,
547             F64 => dl.f64_align,
548             Pointer => dl.pointer_align
549         }
550     }
551 }
552
553 /// Path through fields of nested structures.
554 // FIXME(eddyb) use small vector optimization for the common case.
555 pub type FieldPath = Vec<u32>;
556
557 /// A structure, a product type in ADT terms.
558 #[derive(PartialEq, Eq, Hash, Debug)]
559 pub struct Struct {
560     /// Maximum alignment of fields and repr alignment.
561     pub align: Align,
562
563     /// Primitive alignment of fields without repr alignment.
564     pub primitive_align: Align,
565
566     /// If true, no alignment padding is used.
567     pub packed: bool,
568
569     /// If true, the size is exact, otherwise it's only a lower bound.
570     pub sized: bool,
571
572     /// Offsets for the first byte of each field, ordered to match the source definition order.
573     /// This vector does not go in increasing order.
574     /// FIXME(eddyb) use small vector optimization for the common case.
575     pub offsets: Vec<Size>,
576
577     /// Maps source order field indices to memory order indices, depending how fields were permuted.
578     /// FIXME (camlorn) also consider small vector  optimization here.
579     pub memory_index: Vec<u32>,
580
581     pub min_size: Size,
582 }
583
584 /// Info required to optimize struct layout.
585 #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
586 enum StructKind {
587     /// A tuple, closure, or univariant which cannot be coerced to unsized.
588     AlwaysSizedUnivariant,
589     /// A univariant, the last field of which may be coerced to unsized.
590     MaybeUnsizedUnivariant,
591     /// A univariant, but part of an enum.
592     EnumVariant,
593 }
594
595 impl<'a, 'tcx> Struct {
596     fn new(dl: &TargetDataLayout,
597            fields: &Vec<&'a Layout>,
598            repr: &ReprOptions,
599            kind: StructKind,
600            scapegoat: Ty<'tcx>)
601            -> Result<Struct, LayoutError<'tcx>> {
602         if repr.packed() && repr.align > 0 {
603             bug!("Struct cannot be packed and aligned");
604         }
605
606         let align = if repr.packed() {
607             dl.i8_align
608         } else {
609             dl.aggregate_align
610         };
611
612         let mut ret = Struct {
613             align,
614             primitive_align: align,
615             packed: repr.packed(),
616             sized: true,
617             offsets: vec![],
618             memory_index: vec![],
619             min_size: Size::from_bytes(0),
620         };
621
622         // Anything with repr(C) or repr(packed) doesn't optimize.
623         // Neither do  1-member and 2-member structs.
624         // In addition, code in trans assume that 2-element structs can become pairs.
625         // It's easier to just short-circuit here.
626         let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
627             && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty();
628
629         let (optimize, sort_ascending) = match kind {
630             StructKind::AlwaysSizedUnivariant => (can_optimize, false),
631             StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
632             StructKind::EnumVariant => {
633                 assert!(fields.len() >= 1, "Enum variants must have discriminants.");
634                 (can_optimize && fields[0].size(dl).bytes() == 1, true)
635             }
636         };
637
638         ret.offsets = vec![Size::from_bytes(0); fields.len()];
639         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
640
641         if optimize {
642             let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
643             let end = if let StructKind::MaybeUnsizedUnivariant = kind {
644                 fields.len() - 1
645             } else {
646                 fields.len()
647             };
648             if end > start {
649                 let optimizing  = &mut inverse_memory_index[start..end];
650                 if sort_ascending {
651                     optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
652                 } else {
653                     optimizing.sort_by(| &a, &b | {
654                         let a = fields[a as usize].align(dl).abi();
655                         let b = fields[b as usize].align(dl).abi();
656                         b.cmp(&a)
657                     });
658                 }
659             }
660         }
661
662         // inverse_memory_index holds field indices by increasing memory offset.
663         // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
664         // We now write field offsets to the corresponding offset slot;
665         // field 5 with offset 0 puts 0 in offsets[5].
666         // At the bottom of this function, we use inverse_memory_index to produce memory_index.
667
668         if let StructKind::EnumVariant = kind {
669             assert_eq!(inverse_memory_index[0], 0,
670               "Enum variant discriminants must have the lowest offset.");
671         }
672
673         let mut offset = Size::from_bytes(0);
674
675         for i in inverse_memory_index.iter() {
676             let field = fields[*i as usize];
677             if !ret.sized {
678                 bug!("Struct::new: field #{} of `{}` comes after unsized field",
679                      ret.offsets.len(), scapegoat);
680             }
681
682             if field.is_unsized() {
683                 ret.sized = false;
684             }
685
686             // Invariant: offset < dl.obj_size_bound() <= 1<<61
687             if !ret.packed {
688                 let align = field.align(dl);
689                 let primitive_align = field.primitive_align(dl);
690                 ret.align = ret.align.max(align);
691                 ret.primitive_align = ret.primitive_align.max(primitive_align);
692                 offset = offset.abi_align(align);
693             }
694
695             debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
696             ret.offsets[*i as usize] = offset;
697
698             offset = offset.checked_add(field.size(dl), dl)
699                            .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
700         }
701
702         if repr.align > 0 {
703             let repr_align = repr.align as u64;
704             ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
705             debug!("Struct::new repr_align: {:?}", repr_align);
706         }
707
708         debug!("Struct::new min_size: {:?}", offset);
709         ret.min_size = offset;
710
711         // As stated above, inverse_memory_index holds field indices by increasing offset.
712         // This makes it an already-sorted view of the offsets vec.
713         // To invert it, consider:
714         // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
715         // Field 5 would be the first element, so memory_index is i:
716         // Note: if we didn't optimize, it's already right.
717
718         if optimize {
719             ret.memory_index = vec![0; inverse_memory_index.len()];
720
721             for i in 0..inverse_memory_index.len() {
722                 ret.memory_index[inverse_memory_index[i] as usize]  = i as u32;
723             }
724         } else {
725             ret.memory_index = inverse_memory_index;
726         }
727
728         Ok(ret)
729     }
730
731     /// Get the size with trailing alignment padding.
732     pub fn stride(&self) -> Size {
733         self.min_size.abi_align(self.align)
734     }
735
736     /// Determine whether a structure would be zero-sized, given its fields.
737     fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
738                               -> Result<bool, LayoutError<'tcx>>
739     where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
740         for field in fields {
741             let field = field?;
742             if field.is_unsized() || field.size(dl).bytes() > 0 {
743                 return Ok(false);
744             }
745         }
746         Ok(true)
747     }
748
749     /// Get indices of the tys that made this struct by increasing offset.
750     #[inline]
751     pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
752         let mut inverse_small = [0u8; 64];
753         let mut inverse_big = vec![];
754         let use_small = self.memory_index.len() <= inverse_small.len();
755
756         // We have to write this logic twice in order to keep the array small.
757         if use_small {
758             for i in 0..self.memory_index.len() {
759                 inverse_small[self.memory_index[i] as usize] = i as u8;
760             }
761         } else {
762             inverse_big = vec![0; self.memory_index.len()];
763             for i in 0..self.memory_index.len() {
764                 inverse_big[self.memory_index[i] as usize] = i as u32;
765             }
766         }
767
768         (0..self.memory_index.len()).map(move |i| {
769             if use_small { inverse_small[i] as usize }
770             else { inverse_big[i] as usize }
771         })
772     }
773
774     /// Find the path leading to a non-zero leaf field, starting from
775     /// the given type and recursing through aggregates.
776     /// The tuple is `(path, source_path)`,
777     /// where `path` is in memory order and `source_path` in source order.
778     // FIXME(eddyb) track value ranges and traverse already optimized enums.
779     fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>,
780                               param_env: ty::ParamEnv<'tcx>,
781                               ty: Ty<'tcx>)
782                               -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>> {
783         match (ty.layout(tcx, param_env)?, &ty.sty) {
784             (&Scalar { non_zero: true, .. }, _) |
785             (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
786             (&FatPointer { non_zero: true, .. }, _) => {
787                 Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
788             }
789
790             // Is this the NonZero lang item wrapping a pointer or integer type?
791             (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
792                 let fields = &def.struct_variant().fields;
793                 assert_eq!(fields.len(), 1);
794                 match *fields[0].ty(tcx, substs).layout(tcx, param_env)? {
795                     // FIXME(eddyb) also allow floating-point types here.
796                     Scalar { value: Int(_), non_zero: false } |
797                     Scalar { value: Pointer, non_zero: false } => {
798                         Ok(Some((vec![0], vec![0])))
799                     }
800                     FatPointer { non_zero: false, .. } => {
801                         let tmp = vec![FAT_PTR_ADDR as u32, 0];
802                         Ok(Some((tmp.clone(), tmp)))
803                     }
804                     _ => Ok(None)
805                 }
806             }
807
808             // Perhaps one of the fields of this struct is non-zero
809             // let's recurse and find out
810             (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
811                 Struct::non_zero_field_paths(
812                     tcx,
813                     param_env,
814                     def.struct_variant().fields.iter().map(|field| {
815                         field.ty(tcx, substs)
816                     }),
817                     Some(&variant.memory_index[..]))
818             }
819
820             // Perhaps one of the upvars of this closure is non-zero
821             (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
822                 let upvar_tys = substs.upvar_tys(def, tcx);
823                 Struct::non_zero_field_paths(
824                     tcx,
825                     param_env,
826                     upvar_tys,
827                     Some(&variant.memory_index[..]))
828             }
829             // Can we use one of the fields in this tuple?
830             (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
831                 Struct::non_zero_field_paths(
832                     tcx,
833                     param_env,
834                     tys.iter().cloned(),
835                     Some(&variant.memory_index[..]))
836             }
837
838             // Is this a fixed-size array of something non-zero
839             // with at least one element?
840             (_, &ty::TyArray(ety, d)) if d > 0 => {
841                 Struct::non_zero_field_paths(
842                     tcx,
843                     param_env,
844                     Some(ety).into_iter(),
845                     None)
846             }
847
848             (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
849                 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
850                 if ty == normalized {
851                     return Ok(None);
852                 }
853                 return Struct::non_zero_field_in_type(tcx, param_env, normalized);
854             }
855
856             // Anything else is not a non-zero type.
857             _ => Ok(None)
858         }
859     }
860
861     /// Find the path leading to a non-zero leaf field, starting from
862     /// the given set of fields and recursing through aggregates.
863     /// Returns Some((path, source_path)) on success.
864     /// `path` is translated to memory order. `source_path` is not.
865     fn non_zero_field_paths<I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
866                                param_env: ty::ParamEnv<'tcx>,
867                                fields: I,
868                                permutation: Option<&[u32]>)
869                                -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>>
870     where I: Iterator<Item=Ty<'tcx>> {
871         for (i, ty) in fields.enumerate() {
872             let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?;
873             if let Some((mut path, mut source_path)) = r {
874                 source_path.push(i as u32);
875                 let index = if let Some(p) = permutation {
876                     p[i] as usize
877                 } else {
878                     i
879                 };
880                 path.push(index as u32);
881                 return Ok(Some((path, source_path)));
882             }
883         }
884         Ok(None)
885     }
886
887     pub fn over_align(&self) -> Option<u32> {
888         let align = self.align.abi();
889         let primitive_align = self.primitive_align.abi();
890         if align > primitive_align {
891             Some(align as u32)
892         } else {
893             None
894         }
895     }
896 }
897
898 /// An untagged union.
899 #[derive(PartialEq, Eq, Hash, Debug)]
900 pub struct Union {
901     pub align: Align,
902     pub primitive_align: Align,
903
904     pub min_size: Size,
905
906     /// If true, no alignment padding is used.
907     pub packed: bool,
908 }
909
910 impl<'a, 'tcx> Union {
911     fn new(dl: &TargetDataLayout, repr: &ReprOptions) -> Union {
912         if repr.packed() && repr.align > 0 {
913             bug!("Union cannot be packed and aligned");
914         }
915
916         let primitive_align = if repr.packed() {
917             dl.i8_align
918         } else {
919             dl.aggregate_align
920         };
921
922         let align = if repr.align > 0 {
923             let repr_align = repr.align as u64;
924             debug!("Union::new repr_align: {:?}", repr_align);
925             primitive_align.max(Align::from_bytes(repr_align, repr_align).unwrap())
926         } else {
927             primitive_align
928         };
929
930         Union {
931             align,
932             primitive_align,
933             min_size: Size::from_bytes(0),
934             packed: repr.packed(),
935         }
936     }
937
938     /// Extend the Struct with more fields.
939     fn extend<I>(&mut self, dl: &TargetDataLayout,
940                  fields: I,
941                  scapegoat: Ty<'tcx>)
942                  -> Result<(), LayoutError<'tcx>>
943     where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
944         for (index, field) in fields.enumerate() {
945             let field = field?;
946             if field.is_unsized() {
947                 bug!("Union::extend: field #{} of `{}` is unsized",
948                      index, scapegoat);
949             }
950
951             debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
952
953             if !self.packed {
954                 self.align = self.align.max(field.align(dl));
955                 self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
956             }
957             self.min_size = cmp::max(self.min_size, field.size(dl));
958         }
959
960         debug!("Union::extend min-size: {:?}", self.min_size);
961
962         Ok(())
963     }
964
965     /// Get the size with trailing alignment padding.
966     pub fn stride(&self) -> Size {
967         self.min_size.abi_align(self.align)
968     }
969
970     pub fn over_align(&self) -> Option<u32> {
971         let align = self.align.abi();
972         let primitive_align = self.primitive_align.abi();
973         if align > primitive_align {
974             Some(align as u32)
975         } else {
976             None
977         }
978     }
979 }
980
981 /// The first half of a fat pointer.
982 /// - For a trait object, this is the address of the box.
983 /// - For a slice, this is the base address.
984 pub const FAT_PTR_ADDR: usize = 0;
985
986 /// The second half of a fat pointer.
987 /// - For a trait object, this is the address of the vtable.
988 /// - For a slice, this is the length.
989 pub const FAT_PTR_EXTRA: usize = 1;
990
991 /// Type layout, from which size and alignment can be cheaply computed.
992 /// For ADTs, it also includes field placement and enum optimizations.
993 /// NOTE: Because Layout is interned, redundant information should be
994 /// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
995 #[derive(Debug, PartialEq, Eq, Hash)]
996 pub enum Layout {
997     /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
998     Scalar {
999         value: Primitive,
1000         // If true, the value cannot represent a bit pattern of all zeroes.
1001         non_zero: bool
1002     },
1003
1004     /// SIMD vectors, from structs marked with #[repr(simd)].
1005     Vector {
1006         element: Primitive,
1007         count: u64
1008     },
1009
1010     /// TyArray, TySlice or TyStr.
1011     Array {
1012         /// If true, the size is exact, otherwise it's only a lower bound.
1013         sized: bool,
1014         align: Align,
1015         primitive_align: Align,
1016         element_size: Size,
1017         count: u64
1018     },
1019
1020     /// TyRawPtr or TyRef with a !Sized pointee.
1021     FatPointer {
1022         metadata: Primitive,
1023         /// If true, the pointer cannot be null.
1024         non_zero: bool
1025     },
1026
1027     // Remaining variants are all ADTs such as structs, enums or tuples.
1028
1029     /// C-like enums; basically an integer.
1030     CEnum {
1031         discr: Integer,
1032         signed: bool,
1033         non_zero: bool,
1034         /// Inclusive discriminant range.
1035         /// If min > max, it represents min...u64::MAX followed by 0...max.
1036         // FIXME(eddyb) always use the shortest range, e.g. by finding
1037         // the largest space between two consecutive discriminants and
1038         // taking everything else as the (shortest) discriminant range.
1039         min: u64,
1040         max: u64
1041     },
1042
1043     /// Single-case enums, and structs/tuples.
1044     Univariant {
1045         variant: Struct,
1046         /// If true, the structure is NonZero.
1047         // FIXME(eddyb) use a newtype Layout kind for this.
1048         non_zero: bool
1049     },
1050
1051     /// Untagged unions.
1052     UntaggedUnion {
1053         variants: Union,
1054     },
1055
1056     /// General-case enums: for each case there is a struct, and they
1057     /// all start with a field for the discriminant.
1058     General {
1059         discr: Integer,
1060         variants: Vec<Struct>,
1061         size: Size,
1062         align: Align,
1063         primitive_align: Align,
1064     },
1065
1066     /// Two cases distinguished by a nullable pointer: the case with discriminant
1067     /// `nndiscr` must have single field which is known to be nonnull due to its type.
1068     /// The other case is known to be zero sized. Hence we represent the enum
1069     /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
1070     /// otherwise it indicates the other case.
1071     ///
1072     /// For example, `std::option::Option` instantiated at a safe pointer type
1073     /// is represented such that `None` is a null pointer and `Some` is the
1074     /// identity function.
1075     RawNullablePointer {
1076         nndiscr: u64,
1077         value: Primitive
1078     },
1079
1080     /// Two cases distinguished by a nullable pointer: the case with discriminant
1081     /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
1082     /// field is known to be nonnull due to its type; if that field is null, then
1083     /// it represents the other case, which is known to be zero sized.
1084     StructWrappedNullablePointer {
1085         nndiscr: u64,
1086         nonnull: Struct,
1087         /// N.B. There is a 0 at the start, for LLVM GEP through a pointer.
1088         discrfield: FieldPath,
1089         /// Like discrfield, but in source order. For debuginfo.
1090         discrfield_source: FieldPath
1091     }
1092 }
1093
1094 #[derive(Copy, Clone, Debug)]
1095 pub enum LayoutError<'tcx> {
1096     Unknown(Ty<'tcx>),
1097     SizeOverflow(Ty<'tcx>)
1098 }
1099
1100 impl<'tcx> fmt::Display for LayoutError<'tcx> {
1101     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1102         match *self {
1103             LayoutError::Unknown(ty) => {
1104                 write!(f, "the type `{:?}` has an unknown layout", ty)
1105             }
1106             LayoutError::SizeOverflow(ty) => {
1107                 write!(f, "the type `{:?}` is too big for the current architecture", ty)
1108             }
1109         }
1110     }
1111 }
1112
1113 impl<'a, 'tcx> Layout {
1114     pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1115                             param_env: ty::ParamEnv<'tcx>,
1116                             ty: Ty<'tcx>)
1117                             -> Result<&'tcx Layout, LayoutError<'tcx>> {
1118         let success = |layout| Ok(tcx.intern_layout(layout));
1119         let dl = &tcx.data_layout;
1120         assert!(!ty.has_infer_types());
1121
1122         let ptr_layout = |pointee: Ty<'tcx>| {
1123             let non_zero = !ty.is_unsafe_ptr();
1124             let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
1125             if pointee.is_sized(tcx, param_env, DUMMY_SP) {
1126                 Ok(Scalar { value: Pointer, non_zero: non_zero })
1127             } else {
1128                 let unsized_part = tcx.struct_tail(pointee);
1129                 let meta = match unsized_part.sty {
1130                     ty::TySlice(_) | ty::TyStr => {
1131                         Int(dl.ptr_sized_integer())
1132                     }
1133                     ty::TyDynamic(..) => Pointer,
1134                     _ => return Err(LayoutError::Unknown(unsized_part))
1135                 };
1136                 Ok(FatPointer { metadata: meta, non_zero: non_zero })
1137             }
1138         };
1139
1140         let layout = match ty.sty {
1141             // Basic scalars.
1142             ty::TyBool => Scalar { value: Int(I1), non_zero: false },
1143             ty::TyChar => Scalar { value: Int(I32), non_zero: false },
1144             ty::TyInt(ity) => {
1145                 Scalar {
1146                     value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
1147                     non_zero: false
1148                 }
1149             }
1150             ty::TyUint(ity) => {
1151                 Scalar {
1152                     value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
1153                     non_zero: false
1154                 }
1155             }
1156             ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
1157             ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
1158             ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
1159
1160             // The never type.
1161             ty::TyNever => Univariant {
1162                 variant: Struct::new(dl, &vec![], &ReprOptions::default(),
1163                   StructKind::AlwaysSizedUnivariant, ty)?,
1164                 non_zero: false
1165             },
1166
1167             // Potentially-fat pointers.
1168             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1169             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1170                 ptr_layout(pointee)?
1171             }
1172             ty::TyAdt(def, _) if def.is_box() => {
1173                 ptr_layout(ty.boxed_ty())?
1174             }
1175
1176             // Arrays and slices.
1177             ty::TyArray(element, count) => {
1178                 let element = element.layout(tcx, param_env)?;
1179                 let element_size = element.size(dl);
1180                 // FIXME(eddyb) Don't use host `usize` for array lengths.
1181                 let usize_count: usize = count;
1182                 let count = usize_count as u64;
1183                 if element_size.checked_mul(count, dl).is_none() {
1184                     return Err(LayoutError::SizeOverflow(ty));
1185                 }
1186                 Array {
1187                     sized: true,
1188                     align: element.align(dl),
1189                     primitive_align: element.primitive_align(dl),
1190                     element_size,
1191                     count,
1192                 }
1193             }
1194             ty::TySlice(element) => {
1195                 let element = element.layout(tcx, param_env)?;
1196                 Array {
1197                     sized: false,
1198                     align: element.align(dl),
1199                     primitive_align: element.primitive_align(dl),
1200                     element_size: element.size(dl),
1201                     count: 0
1202                 }
1203             }
1204             ty::TyStr => {
1205                 Array {
1206                     sized: false,
1207                     align: dl.i8_align,
1208                     primitive_align: dl.i8_align,
1209                     element_size: Size::from_bytes(1),
1210                     count: 0
1211                 }
1212             }
1213
1214             // Odd unit types.
1215             ty::TyFnDef(..) => {
1216                 Univariant {
1217                     variant: Struct::new(dl, &vec![],
1218                       &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?,
1219                     non_zero: false
1220                 }
1221             }
1222             ty::TyDynamic(..) => {
1223                 let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(),
1224                   StructKind::AlwaysSizedUnivariant, ty)?;
1225                 unit.sized = false;
1226                 Univariant { variant: unit, non_zero: false }
1227             }
1228
1229             // Tuples, generators and closures.
1230             ty::TyGenerator(def_id, ref substs, _) => {
1231                 let tys = substs.field_tys(def_id, tcx);
1232                 let st = Struct::new(dl,
1233                     &tys.map(|ty| ty.layout(tcx, param_env))
1234                       .collect::<Result<Vec<_>, _>>()?,
1235                     &ReprOptions::default(),
1236                     StructKind::AlwaysSizedUnivariant, ty)?;
1237                 Univariant { variant: st, non_zero: false }
1238             }
1239
1240             ty::TyClosure(def_id, ref substs) => {
1241                 let tys = substs.upvar_tys(def_id, tcx);
1242                 let st = Struct::new(dl,
1243                     &tys.map(|ty| ty.layout(tcx, param_env))
1244                       .collect::<Result<Vec<_>, _>>()?,
1245                     &ReprOptions::default(),
1246                     StructKind::AlwaysSizedUnivariant, ty)?;
1247                 Univariant { variant: st, non_zero: false }
1248             }
1249
1250             ty::TyTuple(tys, _) => {
1251                 let kind = if tys.len() == 0 {
1252                     StructKind::AlwaysSizedUnivariant
1253                 } else {
1254                     StructKind::MaybeUnsizedUnivariant
1255                 };
1256
1257                 let st = Struct::new(dl,
1258                     &tys.iter().map(|ty| ty.layout(tcx, param_env))
1259                       .collect::<Result<Vec<_>, _>>()?,
1260                     &ReprOptions::default(), kind, ty)?;
1261                 Univariant { variant: st, non_zero: false }
1262             }
1263
1264             // SIMD vector types.
1265             ty::TyAdt(def, ..) if def.repr.simd() => {
1266                 let element = ty.simd_type(tcx);
1267                 match *element.layout(tcx, param_env)? {
1268                     Scalar { value, .. } => {
1269                         return success(Vector {
1270                             element: value,
1271                             count: ty.simd_size(tcx) as u64
1272                         });
1273                     }
1274                     _ => {
1275                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1276                                                 a non-machine element type `{}`",
1277                                                 ty, element));
1278                     }
1279                 }
1280             }
1281
1282             // ADTs.
1283             ty::TyAdt(def, substs) => {
1284                 if def.variants.is_empty() {
1285                     // Uninhabitable; represent as unit
1286                     // (Typechecking will reject discriminant-sizing attrs.)
1287
1288                     return success(Univariant {
1289                         variant: Struct::new(dl, &vec![],
1290                           &def.repr, StructKind::AlwaysSizedUnivariant, ty)?,
1291                         non_zero: false
1292                     });
1293                 }
1294
1295                 if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
1296                     // All bodies empty -> intlike
1297                     let (mut min, mut max, mut non_zero) = (i64::max_value(),
1298                                                             i64::min_value(),
1299                                                             true);
1300                     for discr in def.discriminants(tcx) {
1301                         let x = discr.to_u128_unchecked() as i64;
1302                         if x == 0 { non_zero = false; }
1303                         if x < min { min = x; }
1304                         if x > max { max = x; }
1305                     }
1306
1307                     // FIXME: should handle i128? signed-value based impl is weird and hard to
1308                     // grok.
1309                     let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1310                     return success(CEnum {
1311                         discr,
1312                         signed,
1313                         non_zero,
1314                         // FIXME: should be u128?
1315                         min: min as u64,
1316                         max: max as u64
1317                     });
1318                 }
1319
1320                 if !def.is_enum() || (def.variants.len() == 1 &&
1321                                       !def.repr.inhibit_enum_layout_opt()) {
1322                     // Struct, or union, or univariant enum equivalent to a struct.
1323                     // (Typechecking will reject discriminant-sizing attrs.)
1324
1325                     let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
1326                         StructKind::AlwaysSizedUnivariant
1327                     } else {
1328                         let param_env = tcx.param_env(def.did);
1329                         let fields = &def.variants[0].fields;
1330                         let last_field = &fields[fields.len()-1];
1331                         let always_sized = tcx.type_of(last_field.did)
1332                           .is_sized(tcx, param_env, DUMMY_SP);
1333                         if !always_sized { StructKind::MaybeUnsizedUnivariant }
1334                         else { StructKind::AlwaysSizedUnivariant }
1335                     };
1336
1337                     let fields = def.variants[0].fields.iter().map(|field| {
1338                         field.ty(tcx, substs).layout(tcx, param_env)
1339                     }).collect::<Result<Vec<_>, _>>()?;
1340                     let layout = if def.is_union() {
1341                         let mut un = Union::new(dl, &def.repr);
1342                         un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
1343                         UntaggedUnion { variants: un }
1344                     } else {
1345                         let st = Struct::new(dl, &fields, &def.repr,
1346                           kind, ty)?;
1347                         let non_zero = Some(def.did) == tcx.lang_items().non_zero();
1348                         Univariant { variant: st, non_zero: non_zero }
1349                     };
1350                     return success(layout);
1351                 }
1352
1353                 // Since there's at least one
1354                 // non-empty body, explicit discriminants should have
1355                 // been rejected by a checker before this point.
1356                 for (i, v) in def.variants.iter().enumerate() {
1357                     if v.discr != ty::VariantDiscr::Relative(i) {
1358                         bug!("non-C-like enum {} with specified discriminants",
1359                             tcx.item_path_str(def.did));
1360                     }
1361                 }
1362
1363                 // Cache the substituted and normalized variant field types.
1364                 let variants = def.variants.iter().map(|v| {
1365                     v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
1366                 }).collect::<Vec<_>>();
1367
1368                 if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() {
1369                     // Nullable pointer optimization
1370                     for discr in 0..2 {
1371                         let other_fields = variants[1 - discr].iter().map(|ty| {
1372                             ty.layout(tcx, param_env)
1373                         });
1374                         if !Struct::would_be_zero_sized(dl, other_fields)? {
1375                             continue;
1376                         }
1377                         let paths = Struct::non_zero_field_paths(tcx,
1378                                                                  param_env,
1379                                                                  variants[discr].iter().cloned(),
1380                                                                  None)?;
1381                         let (mut path, mut path_source) = if let Some(p) = paths { p }
1382                           else { continue };
1383
1384                         // FIXME(eddyb) should take advantage of a newtype.
1385                         if path == &[0] && variants[discr].len() == 1 {
1386                             let value = match *variants[discr][0].layout(tcx, param_env)? {
1387                                 Scalar { value, .. } => value,
1388                                 CEnum { discr, .. } => Int(discr),
1389                                 _ => bug!("Layout::compute: `{}`'s non-zero \
1390                                            `{}` field not scalar?!",
1391                                            ty, variants[discr][0])
1392                             };
1393                             return success(RawNullablePointer {
1394                                 nndiscr: discr as u64,
1395                                 value,
1396                             });
1397                         }
1398
1399                         let st = Struct::new(dl,
1400                             &variants[discr].iter().map(|ty| ty.layout(tcx, param_env))
1401                               .collect::<Result<Vec<_>, _>>()?,
1402                             &def.repr, StructKind::AlwaysSizedUnivariant, ty)?;
1403
1404                         // We have to fix the last element of path here.
1405                         let mut i = *path.last().unwrap();
1406                         i = st.memory_index[i as usize];
1407                         *path.last_mut().unwrap() = i;
1408                         path.push(0); // For GEP through a pointer.
1409                         path.reverse();
1410                         path_source.push(0);
1411                         path_source.reverse();
1412
1413                         return success(StructWrappedNullablePointer {
1414                             nndiscr: discr as u64,
1415                             nonnull: st,
1416                             discrfield: path,
1417                             discrfield_source: path_source
1418                         });
1419                     }
1420                 }
1421
1422                 // The general case.
1423                 let discr_max = (variants.len() - 1) as i64;
1424                 assert!(discr_max >= 0);
1425                 let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
1426                 let mut align = dl.aggregate_align;
1427                 let mut primitive_align = dl.aggregate_align;
1428                 let mut size = Size::from_bytes(0);
1429
1430                 // We're interested in the smallest alignment, so start large.
1431                 let mut start_align = Align::from_bytes(256, 256).unwrap();
1432
1433                 // Create the set of structs that represent each variant
1434                 // Use the minimum integer type we figured out above
1435                 let discr = Scalar { value: Int(min_ity), non_zero: false };
1436                 let mut variants = variants.into_iter().map(|fields| {
1437                     let mut fields = fields.into_iter().map(|field| {
1438                         field.layout(tcx, param_env)
1439                     }).collect::<Result<Vec<_>, _>>()?;
1440                     fields.insert(0, &discr);
1441                     let st = Struct::new(dl,
1442                         &fields,
1443                         &def.repr, StructKind::EnumVariant, ty)?;
1444                     // Find the first field we can't move later
1445                     // to make room for a larger discriminant.
1446                     // It is important to skip the first field.
1447                     for i in st.field_index_by_increasing_offset().skip(1) {
1448                         let field = fields[i];
1449                         let field_align = field.align(dl);
1450                         if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1451                             start_align = start_align.min(field_align);
1452                             break;
1453                         }
1454                     }
1455                     size = cmp::max(size, st.min_size);
1456                     align = align.max(st.align);
1457                     primitive_align = primitive_align.max(st.primitive_align);
1458                     Ok(st)
1459                 }).collect::<Result<Vec<_>, _>>()?;
1460
1461                 // Align the maximum variant size to the largest alignment.
1462                 size = size.abi_align(align);
1463
1464                 if size.bytes() >= dl.obj_size_bound() {
1465                     return Err(LayoutError::SizeOverflow(ty));
1466                 }
1467
1468                 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1469                 if typeck_ity < min_ity {
1470                     // It is a bug if Layout decided on a greater discriminant size than typeck for
1471                     // some reason at this point (based on values discriminant can take on). Mostly
1472                     // because this discriminant will be loaded, and then stored into variable of
1473                     // type calculated by typeck. Consider such case (a bug): typeck decided on
1474                     // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1475                     // discriminant values. That would be a bug, because then, in trans, in order
1476                     // to store this 16-bit discriminant into 8-bit sized temporary some of the
1477                     // space necessary to represent would have to be discarded (or layout is wrong
1478                     // on thinking it needs 16 bits)
1479                     bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1480                          min_ity, typeck_ity);
1481                     // However, it is fine to make discr type however large (as an optimisation)
1482                     // after this point â€“ we’ll just truncate the value we load in trans.
1483                 }
1484
1485                 // Check to see if we should use a different type for the
1486                 // discriminant. We can safely use a type with the same size
1487                 // as the alignment of the first field of each variant.
1488                 // We increase the size of the discriminant to avoid LLVM copying
1489                 // padding when it doesn't need to. This normally causes unaligned
1490                 // load/stores and excessive memcpy/memset operations. By using a
1491                 // bigger integer size, LLVM can be sure about it's contents and
1492                 // won't be so conservative.
1493
1494                 // Use the initial field alignment
1495                 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
1496
1497                 // If the alignment is not larger than the chosen discriminant size,
1498                 // don't use the alignment as the final size.
1499                 if ity <= min_ity {
1500                     ity = min_ity;
1501                 } else {
1502                     // Patch up the variants' first few fields.
1503                     let old_ity_size = Int(min_ity).size(dl);
1504                     let new_ity_size = Int(ity).size(dl);
1505                     for variant in &mut variants {
1506                         for i in variant.offsets.iter_mut() {
1507                             // The first field is the discrimminant, at offset 0.
1508                             // These aren't in order, and we need to skip it.
1509                             if *i <= old_ity_size && *i > Size::from_bytes(0) {
1510                                 *i = new_ity_size;
1511                             }
1512                         }
1513                         // We might be making the struct larger.
1514                         if variant.min_size <= old_ity_size {
1515                             variant.min_size = new_ity_size;
1516                         }
1517                     }
1518                 }
1519
1520                 General {
1521                     discr: ity,
1522                     variants,
1523                     size,
1524                     align,
1525                     primitive_align,
1526                 }
1527             }
1528
1529             // Types with no meaningful known layout.
1530             ty::TyProjection(_) | ty::TyAnon(..) => {
1531                 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
1532                 if ty == normalized {
1533                     return Err(LayoutError::Unknown(ty));
1534                 }
1535                 return normalized.layout(tcx, param_env);
1536             }
1537             ty::TyParam(_) => {
1538                 return Err(LayoutError::Unknown(ty));
1539             }
1540             ty::TyInfer(_) | ty::TyError => {
1541                 bug!("Layout::compute: unexpected type `{}`", ty)
1542             }
1543         };
1544
1545         success(layout)
1546     }
1547
1548     /// Returns true if the layout corresponds to an unsized type.
1549     pub fn is_unsized(&self) -> bool {
1550         match *self {
1551             Scalar {..} | Vector {..} | FatPointer {..} |
1552             CEnum {..} | UntaggedUnion {..} | General {..} |
1553             RawNullablePointer {..} |
1554             StructWrappedNullablePointer {..} => false,
1555
1556             Array { sized, .. } |
1557             Univariant { variant: Struct { sized, .. }, .. } => !sized
1558         }
1559     }
1560
1561     pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
1562         let dl = cx.data_layout();
1563
1564         match *self {
1565             Scalar { value, .. } | RawNullablePointer { value, .. } => {
1566                 value.size(dl)
1567             }
1568
1569             Vector { element, count } => {
1570                 let element_size = element.size(dl);
1571                 let vec_size = match element_size.checked_mul(count, dl) {
1572                     Some(size) => size,
1573                     None => bug!("Layout::size({:?}): {} * {} overflowed",
1574                                  self, element_size.bytes(), count)
1575                 };
1576                 vec_size.abi_align(self.align(dl))
1577             }
1578
1579             Array { element_size, count, .. } => {
1580                 match element_size.checked_mul(count, dl) {
1581                     Some(size) => size,
1582                     None => bug!("Layout::size({:?}): {} * {} overflowed",
1583                                  self, element_size.bytes(), count)
1584                 }
1585             }
1586
1587             FatPointer { metadata, .. } => {
1588                 // Effectively a (ptr, meta) tuple.
1589                 Pointer.size(dl).abi_align(metadata.align(dl))
1590                        .checked_add(metadata.size(dl), dl).unwrap()
1591                        .abi_align(self.align(dl))
1592             }
1593
1594             CEnum { discr, .. } => Int(discr).size(dl),
1595             General { size, .. } => size,
1596             UntaggedUnion { ref variants } => variants.stride(),
1597
1598             Univariant { ref variant, .. } |
1599             StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1600                 variant.stride()
1601             }
1602         }
1603     }
1604
1605     pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
1606         let dl = cx.data_layout();
1607
1608         match *self {
1609             Scalar { value, .. } | RawNullablePointer { value, .. } => {
1610                 value.align(dl)
1611             }
1612
1613             Vector { element, count } => {
1614                 let elem_size = element.size(dl);
1615                 let vec_size = match elem_size.checked_mul(count, dl) {
1616                     Some(size) => size,
1617                     None => bug!("Layout::align({:?}): {} * {} overflowed",
1618                                  self, elem_size.bytes(), count)
1619                 };
1620                 for &(size, align) in &dl.vector_align {
1621                     if size == vec_size {
1622                         return align;
1623                     }
1624                 }
1625                 // Default to natural alignment, which is what LLVM does.
1626                 // That is, use the size, rounded up to a power of 2.
1627                 let align = vec_size.bytes().next_power_of_two();
1628                 Align::from_bytes(align, align).unwrap()
1629             }
1630
1631             FatPointer { metadata, .. } => {
1632                 // Effectively a (ptr, meta) tuple.
1633                 Pointer.align(dl).max(metadata.align(dl))
1634             }
1635
1636             CEnum { discr, .. } => Int(discr).align(dl),
1637             Array { align, .. } | General { align, .. } => align,
1638             UntaggedUnion { ref variants } => variants.align,
1639
1640             Univariant { ref variant, .. } |
1641             StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1642                 variant.align
1643             }
1644         }
1645     }
1646
1647     /// Returns alignment before repr alignment is applied
1648     pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
1649         match *self {
1650             Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
1651             Univariant { ref variant, .. } |
1652             StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1653                 variant.primitive_align
1654             },
1655
1656             _ => self.align(dl)
1657         }
1658     }
1659
1660     /// Returns repr alignment if it is greater than the primitive alignment.
1661     pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
1662         let align = self.align(dl);
1663         let primitive_align = self.primitive_align(dl);
1664         if align.abi() > primitive_align.abi() {
1665             Some(align.abi() as u32)
1666         } else {
1667             None
1668         }
1669     }
1670
1671     pub fn field_offset<C: HasDataLayout>(&self,
1672                                           cx: C,
1673                                           i: usize,
1674                                           variant_index: Option<usize>)
1675                                           -> Size {
1676         let dl = cx.data_layout();
1677
1678         match *self {
1679             Scalar { .. } |
1680             CEnum { .. } |
1681             UntaggedUnion { .. } |
1682             RawNullablePointer { .. } => {
1683                 Size::from_bytes(0)
1684             }
1685
1686             Vector { element, count } => {
1687                 let element_size = element.size(dl);
1688                 let i = i as u64;
1689                 assert!(i < count);
1690                 Size::from_bytes(element_size.bytes() * count)
1691             }
1692
1693             Array { element_size, count, .. } => {
1694                 let i = i as u64;
1695                 assert!(i < count);
1696                 Size::from_bytes(element_size.bytes() * count)
1697             }
1698
1699             FatPointer { metadata, .. } => {
1700                 // Effectively a (ptr, meta) tuple.
1701                 assert!(i < 2);
1702                 if i == 0 {
1703                     Size::from_bytes(0)
1704                 } else {
1705                     Pointer.size(dl).abi_align(metadata.align(dl))
1706                 }
1707             }
1708
1709             Univariant { ref variant, .. } => variant.offsets[i],
1710
1711             General { ref variants, .. } => {
1712                 let v = variant_index.expect("variant index required");
1713                 variants[v].offsets[i + 1]
1714             }
1715
1716             StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
1717                 if Some(nndiscr as usize) == variant_index {
1718                     nonnull.offsets[i]
1719                 } else {
1720                     Size::from_bytes(0)
1721                 }
1722             }
1723         }
1724     }
1725
1726     /// This is invoked by the `layout_raw` query to record the final
1727     /// layout of each type.
1728     #[inline]
1729     pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1730                                       ty: Ty<'tcx>,
1731                                       param_env: ty::ParamEnv<'tcx>,
1732                                       layout: &Layout) {
1733         // If we are running with `-Zprint-type-sizes`, record layouts for
1734         // dumping later. Ignore layouts that are done with non-empty
1735         // environments or non-monomorphic layouts, as the user only wants
1736         // to see the stuff resulting from the final trans session.
1737         if
1738             !tcx.sess.opts.debugging_opts.print_type_sizes ||
1739             ty.has_param_types() ||
1740             ty.has_self_ty() ||
1741             !param_env.caller_bounds.is_empty()
1742         {
1743             return;
1744         }
1745
1746         Self::record_layout_for_printing_outlined(tcx, ty, param_env, layout)
1747     }
1748
1749     fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1750                                            ty: Ty<'tcx>,
1751                                            param_env: ty::ParamEnv<'tcx>,
1752                                            layout: &Layout) {
1753         // (delay format until we actually need it)
1754         let record = |kind, opt_discr_size, variants| {
1755             let type_desc = format!("{:?}", ty);
1756             let overall_size = layout.size(tcx);
1757             let align = layout.align(tcx);
1758             tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1759                                                               type_desc,
1760                                                               align,
1761                                                               overall_size,
1762                                                               opt_discr_size,
1763                                                               variants);
1764         };
1765
1766         let (adt_def, substs) = match ty.sty {
1767             ty::TyAdt(ref adt_def, substs) => {
1768                 debug!("print-type-size t: `{:?}` process adt", ty);
1769                 (adt_def, substs)
1770             }
1771
1772             ty::TyClosure(..) => {
1773                 debug!("print-type-size t: `{:?}` record closure", ty);
1774                 record(DataTypeKind::Closure, None, vec![]);
1775                 return;
1776             }
1777
1778             _ => {
1779                 debug!("print-type-size t: `{:?}` skip non-nominal", ty);
1780                 return;
1781             }
1782         };
1783
1784         let adt_kind = adt_def.adt_kind();
1785
1786         let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| {
1787             let layout = field_ty.layout(tcx, param_env);
1788             match layout {
1789                 Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty),
1790                 Ok(field_layout) => {
1791                     session::FieldInfo {
1792                         name: field_name.to_string(),
1793                         offset: offset.bytes(),
1794                         size: field_layout.size(tcx).bytes(),
1795                         align: field_layout.align(tcx).abi(),
1796                     }
1797                 }
1798             }
1799         };
1800
1801         let build_primitive_info = |name: ast::Name, value: &Primitive| {
1802             session::VariantInfo {
1803                 name: Some(name.to_string()),
1804                 kind: session::SizeKind::Exact,
1805                 align: value.align(tcx).abi(),
1806                 size: value.size(tcx).bytes(),
1807                 fields: vec![],
1808             }
1809         };
1810
1811         enum Fields<'a> {
1812             WithDiscrim(&'a Struct),
1813             NoDiscrim(&'a Struct),
1814         }
1815
1816         let build_variant_info = |n: Option<ast::Name>,
1817                                   flds: &[(ast::Name, Ty<'tcx>)],
1818                                   layout: Fields| {
1819             let (s, field_offsets) = match layout {
1820                 Fields::WithDiscrim(s) => (s, &s.offsets[1..]),
1821                 Fields::NoDiscrim(s) => (s, &s.offsets[0..]),
1822             };
1823             let field_info: Vec<_> =
1824                 flds.iter()
1825                     .zip(field_offsets.iter())
1826                     .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset))
1827                     .collect();
1828
1829             session::VariantInfo {
1830                 name: n.map(|n|n.to_string()),
1831                 kind: if s.sized {
1832                     session::SizeKind::Exact
1833                 } else {
1834                     session::SizeKind::Min
1835                 },
1836                 align: s.align.abi(),
1837                 size: s.min_size.bytes(),
1838                 fields: field_info,
1839             }
1840         };
1841
1842         match *layout {
1843             Layout::StructWrappedNullablePointer { nonnull: ref variant_layout,
1844                                                    nndiscr,
1845                                                    discrfield: _,
1846                                                    discrfield_source: _ } => {
1847                 debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}",
1848                        ty, nndiscr, variant_layout);
1849                 let variant_def = &adt_def.variants[nndiscr as usize];
1850                 let fields: Vec<_> =
1851                     variant_def.fields.iter()
1852                                       .map(|field_def| (field_def.name, field_def.ty(tcx, substs)))
1853                                       .collect();
1854                 record(adt_kind.into(),
1855                        None,
1856                        vec![build_variant_info(Some(variant_def.name),
1857                                                &fields,
1858                                                Fields::NoDiscrim(variant_layout))]);
1859             }
1860             Layout::RawNullablePointer { nndiscr, value } => {
1861                 debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}",
1862                        ty, nndiscr, value);
1863                 let variant_def = &adt_def.variants[nndiscr as usize];
1864                 record(adt_kind.into(), None,
1865                        vec![build_primitive_info(variant_def.name, &value)]);
1866             }
1867             Layout::Univariant { variant: ref variant_layout, non_zero: _ } => {
1868                 let variant_names = || {
1869                     adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::<Vec<_>>()
1870                 };
1871                 debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}",
1872                        ty, variant_layout, variant_names());
1873                 assert!(adt_def.variants.len() <= 1,
1874                         "univariant with variants {:?}", variant_names());
1875                 if adt_def.variants.len() == 1 {
1876                     let variant_def = &adt_def.variants[0];
1877                     let fields: Vec<_> =
1878                         variant_def.fields.iter()
1879                                           .map(|f| (f.name, f.ty(tcx, substs)))
1880                                           .collect();
1881                     record(adt_kind.into(),
1882                            None,
1883                            vec![build_variant_info(Some(variant_def.name),
1884                                                    &fields,
1885                                                    Fields::NoDiscrim(variant_layout))]);
1886                 } else {
1887                     // (This case arises for *empty* enums; so give it
1888                     // zero variants.)
1889                     record(adt_kind.into(), None, vec![]);
1890                 }
1891             }
1892
1893             Layout::General { ref variants, discr, .. } => {
1894                 debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}",
1895                        ty, adt_def.variants.len(), variants.len(), variants);
1896                 let variant_infos: Vec<_> =
1897                     adt_def.variants.iter()
1898                                     .zip(variants.iter())
1899                                     .map(|(variant_def, variant_layout)| {
1900                                         let fields: Vec<_> =
1901                                             variant_def.fields
1902                                                        .iter()
1903                                                        .map(|f| (f.name, f.ty(tcx, substs)))
1904                                                        .collect();
1905                                         build_variant_info(Some(variant_def.name),
1906                                                            &fields,
1907                                                            Fields::WithDiscrim(variant_layout))
1908                                     })
1909                                     .collect();
1910                 record(adt_kind.into(), Some(discr.size()), variant_infos);
1911             }
1912
1913             Layout::UntaggedUnion { ref variants } => {
1914                 debug!("print-type-size t: `{:?}` adt union variants {:?}",
1915                        ty, variants);
1916                 // layout does not currently store info about each
1917                 // variant...
1918                 record(adt_kind.into(), None, Vec::new());
1919             }
1920
1921             Layout::CEnum { discr, .. } => {
1922                 debug!("print-type-size t: `{:?}` adt c-like enum", ty);
1923                 let variant_infos: Vec<_> =
1924                     adt_def.variants.iter()
1925                                     .map(|variant_def| {
1926                                         build_primitive_info(variant_def.name,
1927                                                              &Primitive::Int(discr))
1928                                     })
1929                                     .collect();
1930                 record(adt_kind.into(), Some(discr.size()), variant_infos);
1931             }
1932
1933             // other cases provide little interesting (i.e. adjustable
1934             // via representation tweaks) size info beyond total size.
1935             Layout::Scalar { .. } |
1936             Layout::Vector { .. } |
1937             Layout::Array { .. } |
1938             Layout::FatPointer { .. } => {
1939                 debug!("print-type-size t: `{:?}` adt other", ty);
1940                 record(adt_kind.into(), None, Vec::new())
1941             }
1942         }
1943     }
1944 }
1945
1946 /// Type size "skeleton", i.e. the only information determining a type's size.
1947 /// While this is conservative, (aside from constant sizes, only pointers,
1948 /// newtypes thereof and null pointer optimized enums are allowed), it is
1949 /// enough to statically check common usecases of transmute.
1950 #[derive(Copy, Clone, Debug)]
1951 pub enum SizeSkeleton<'tcx> {
1952     /// Any statically computable Layout.
1953     Known(Size),
1954
1955     /// A potentially-fat pointer.
1956     Pointer {
1957         /// If true, this pointer is never null.
1958         non_zero: bool,
1959         /// The type which determines the unsized metadata, if any,
1960         /// of this pointer. Either a type parameter or a projection
1961         /// depending on one, with regions erased.
1962         tail: Ty<'tcx>
1963     }
1964 }
1965
1966 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1967     pub fn compute(ty: Ty<'tcx>,
1968                    tcx: TyCtxt<'a, 'tcx, 'tcx>,
1969                    param_env: ty::ParamEnv<'tcx>)
1970                    -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1971         assert!(!ty.has_infer_types());
1972
1973         // First try computing a static layout.
1974         let err = match ty.layout(tcx, param_env) {
1975             Ok(layout) => {
1976                 return Ok(SizeSkeleton::Known(layout.size(tcx)));
1977             }
1978             Err(err) => err
1979         };
1980
1981         let ptr_skeleton = |pointee: Ty<'tcx>| {
1982             let non_zero = !ty.is_unsafe_ptr();
1983             let tail = tcx.struct_tail(pointee);
1984             match tail.sty {
1985                 ty::TyParam(_) | ty::TyProjection(_) => {
1986                     assert!(tail.has_param_types() || tail.has_self_ty());
1987                     Ok(SizeSkeleton::Pointer {
1988                         non_zero,
1989                         tail: tcx.erase_regions(&tail)
1990                     })
1991                 }
1992                 _ => {
1993                     bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1994                             tail `{}` is not a type parameter or a projection",
1995                             ty, err, tail)
1996                 }
1997             }
1998         };
1999
2000         match ty.sty {
2001             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2002             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2003                 ptr_skeleton(pointee)
2004             }
2005             ty::TyAdt(def, _) if def.is_box() => {
2006                 ptr_skeleton(ty.boxed_ty())
2007             }
2008
2009             ty::TyAdt(def, substs) => {
2010                 // Only newtypes and enums w/ nullable pointer optimization.
2011                 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
2012                     return Err(err);
2013                 }
2014
2015                 // Get a zero-sized variant or a pointer newtype.
2016                 let zero_or_ptr_variant = |i: usize| {
2017                     let fields = def.variants[i].fields.iter().map(|field| {
2018                         SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2019                     });
2020                     let mut ptr = None;
2021                     for field in fields {
2022                         let field = field?;
2023                         match field {
2024                             SizeSkeleton::Known(size) => {
2025                                 if size.bytes() > 0 {
2026                                     return Err(err);
2027                                 }
2028                             }
2029                             SizeSkeleton::Pointer {..} => {
2030                                 if ptr.is_some() {
2031                                     return Err(err);
2032                                 }
2033                                 ptr = Some(field);
2034                             }
2035                         }
2036                     }
2037                     Ok(ptr)
2038                 };
2039
2040                 let v0 = zero_or_ptr_variant(0)?;
2041                 // Newtype.
2042                 if def.variants.len() == 1 {
2043                     if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2044                         return Ok(SizeSkeleton::Pointer {
2045                             non_zero: non_zero ||
2046                                 Some(def.did) == tcx.lang_items().non_zero(),
2047                             tail,
2048                         });
2049                     } else {
2050                         return Err(err);
2051                     }
2052                 }
2053
2054                 let v1 = zero_or_ptr_variant(1)?;
2055                 // Nullable pointer enum optimization.
2056                 match (v0, v1) {
2057                     (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
2058                     (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2059                         Ok(SizeSkeleton::Pointer {
2060                             non_zero: false,
2061                             tail,
2062                         })
2063                     }
2064                     _ => Err(err)
2065                 }
2066             }
2067
2068             ty::TyProjection(_) | ty::TyAnon(..) => {
2069                 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
2070                 if ty == normalized {
2071                     Err(err)
2072                 } else {
2073                     SizeSkeleton::compute(normalized, tcx, param_env)
2074                 }
2075             }
2076
2077             _ => Err(err)
2078         }
2079     }
2080
2081     pub fn same_size(self, other: SizeSkeleton) -> bool {
2082         match (self, other) {
2083             (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2084             (SizeSkeleton::Pointer { tail: a, .. },
2085              SizeSkeleton::Pointer { tail: b, .. }) => a == b,
2086             _ => false
2087         }
2088     }
2089 }
2090
2091 /// A pair of a type and its layout. Implements various
2092 /// type traversal APIs (e.g. recursing into fields).
2093 #[derive(Copy, Clone, Debug)]
2094 pub struct TyLayout<'tcx> {
2095     pub ty: Ty<'tcx>,
2096     pub layout: &'tcx Layout,
2097     pub variant_index: Option<usize>,
2098 }
2099
2100 impl<'tcx> Deref for TyLayout<'tcx> {
2101     type Target = Layout;
2102     fn deref(&self) -> &Layout {
2103         self.layout
2104     }
2105 }
2106
2107 pub trait LayoutTyper<'tcx>: HasDataLayout {
2108     type TyLayout;
2109
2110     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
2111     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
2112     fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>;
2113 }
2114
2115 /// Combines a tcx with the parameter environment so that you can
2116 /// compute layout operations.
2117 #[derive(Copy, Clone)]
2118 pub struct LayoutCx<'a, 'tcx: 'a> {
2119     tcx: TyCtxt<'a, 'tcx, 'tcx>,
2120     param_env: ty::ParamEnv<'tcx>,
2121 }
2122
2123 impl<'a, 'tcx> LayoutCx<'a, 'tcx> {
2124     pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
2125         LayoutCx { tcx, param_env }
2126     }
2127 }
2128
2129 impl<'a, 'tcx> HasDataLayout for LayoutCx<'a, 'tcx> {
2130     fn data_layout(&self) -> &TargetDataLayout {
2131         &self.tcx.data_layout
2132     }
2133 }
2134
2135 impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> {
2136     type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
2137
2138     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
2139         self.tcx
2140     }
2141
2142     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
2143         let ty = self.normalize_projections(ty);
2144
2145         Ok(TyLayout {
2146             ty,
2147             layout: ty.layout(self.tcx, self.param_env)?,
2148             variant_index: None
2149         })
2150     }
2151
2152     fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
2153         self.tcx.normalize_associated_type_in_env(&ty, self.param_env)
2154     }
2155 }
2156
2157 impl<'a, 'tcx> TyLayout<'tcx> {
2158     pub fn for_variant(&self, variant_index: usize) -> Self {
2159         TyLayout {
2160             variant_index: Some(variant_index),
2161             ..*self
2162         }
2163     }
2164
2165     pub fn field_offset<C: HasDataLayout>(&self, cx: C, i: usize) -> Size {
2166         self.layout.field_offset(cx, i, self.variant_index)
2167     }
2168
2169     pub fn field_count(&self) -> usize {
2170         // Handle enum/union through the type rather than Layout.
2171         if let ty::TyAdt(def, _) = self.ty.sty {
2172             let v = self.variant_index.unwrap_or(0);
2173             if def.variants.is_empty() {
2174                 assert_eq!(v, 0);
2175                 return 0;
2176             } else {
2177                 return def.variants[v].fields.len();
2178             }
2179         }
2180
2181         match *self.layout {
2182             Scalar { .. } => {
2183                 bug!("TyLayout::field_count({:?}): not applicable", self)
2184             }
2185
2186             // Handled above (the TyAdt case).
2187             CEnum { .. } |
2188             General { .. } |
2189             UntaggedUnion { .. } |
2190             RawNullablePointer { .. } |
2191             StructWrappedNullablePointer { .. } => bug!(),
2192
2193             FatPointer { .. } => 2,
2194
2195             Vector { count, .. } |
2196             Array { count, .. } => {
2197                 let usize_count = count as usize;
2198                 assert_eq!(usize_count as u64, count);
2199                 usize_count
2200             }
2201
2202             Univariant { ref variant, .. } => variant.offsets.len(),
2203         }
2204     }
2205
2206     pub fn field_type<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
2207         let tcx = cx.tcx();
2208
2209         let ptr_field_type = |pointee: Ty<'tcx>| {
2210             assert!(i < 2);
2211             let slice = |element: Ty<'tcx>| {
2212                 if i == 0 {
2213                     tcx.mk_mut_ptr(element)
2214                 } else {
2215                     tcx.types.usize
2216                 }
2217             };
2218             match tcx.struct_tail(pointee).sty {
2219                 ty::TySlice(element) => slice(element),
2220                 ty::TyStr => slice(tcx.types.u8),
2221                 ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()),
2222                 _ => bug!("TyLayout::field_type({:?}): not applicable", self)
2223             }
2224         };
2225
2226         match self.ty.sty {
2227             ty::TyBool |
2228             ty::TyChar |
2229             ty::TyInt(_) |
2230             ty::TyUint(_) |
2231             ty::TyFloat(_) |
2232             ty::TyFnPtr(_) |
2233             ty::TyNever |
2234             ty::TyFnDef(..) |
2235             ty::TyDynamic(..) => {
2236                 bug!("TyLayout::field_type({:?}): not applicable", self)
2237             }
2238
2239             // Potentially-fat pointers.
2240             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2241             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2242                 ptr_field_type(pointee)
2243             }
2244             ty::TyAdt(def, _) if def.is_box() => {
2245                 ptr_field_type(self.ty.boxed_ty())
2246             }
2247
2248             // Arrays and slices.
2249             ty::TyArray(element, _) |
2250             ty::TySlice(element) => element,
2251             ty::TyStr => tcx.types.u8,
2252
2253             // Tuples, generators and closures.
2254             ty::TyClosure(def_id, ref substs) => {
2255                 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2256             }
2257
2258             ty::TyGenerator(def_id, ref substs, _) => {
2259                 substs.field_tys(def_id, tcx).nth(i).unwrap()
2260             }
2261
2262             ty::TyTuple(tys, _) => tys[i],
2263
2264             // SIMD vector types.
2265             ty::TyAdt(def, ..) if def.repr.simd() => {
2266                 self.ty.simd_type(tcx)
2267             }
2268
2269             // ADTs.
2270             ty::TyAdt(def, substs) => {
2271                 def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
2272             }
2273
2274             ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
2275             ty::TyInfer(_) | ty::TyError => {
2276                 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
2277             }
2278         }
2279     }
2280
2281     pub fn field<C: LayoutTyper<'tcx>>(&self,
2282                                        cx: C,
2283                                        i: usize)
2284                                        -> C::TyLayout {
2285         cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
2286     }
2287 }