1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 pub use self::Integer::*;
12 pub use self::Layout::*;
13 pub use self::Primitive::*;
18 use ty::{self, Ty, TyCtxt, TypeFoldable};
20 use syntax::ast::{FloatTy, IntTy, UintTy};
22 use syntax_pos::DUMMY_SP;
23 use rustc_const_math::ConstInt;
30 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
31 /// for a target, which contains everything needed to compute layouts.
32 pub struct TargetDataLayout {
39 pub i128_align: Align,
42 pub pointer_size: Size,
43 pub pointer_align: Align,
44 pub aggregate_align: Align,
46 /// Alignments for vector types.
47 pub vector_align: Vec<(Size, Align)>
50 impl Default for TargetDataLayout {
51 /// Creates an instance of `TargetDataLayout`.
52 fn default() -> TargetDataLayout {
55 i1_align: Align::from_bits(8, 8).unwrap(),
56 i8_align: Align::from_bits(8, 8).unwrap(),
57 i16_align: Align::from_bits(16, 16).unwrap(),
58 i32_align: Align::from_bits(32, 32).unwrap(),
59 i64_align: Align::from_bits(32, 64).unwrap(),
60 i128_align: Align::from_bits(32, 64).unwrap(),
61 f32_align: Align::from_bits(32, 32).unwrap(),
62 f64_align: Align::from_bits(64, 64).unwrap(),
63 pointer_size: Size::from_bits(64),
64 pointer_align: Align::from_bits(64, 64).unwrap(),
65 aggregate_align: Align::from_bits(0, 64).unwrap(),
67 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
68 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
74 impl TargetDataLayout {
75 pub fn parse(sess: &Session) -> TargetDataLayout {
76 // Parse a bit count from a string.
77 let parse_bits = |s: &str, kind: &str, cause: &str| {
78 s.parse::<u64>().unwrap_or_else(|err| {
79 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
80 kind, s, cause, err));
85 // Parse a size string.
86 let size = |s: &str, cause: &str| {
87 Size::from_bits(parse_bits(s, "size", cause))
90 // Parse an alignment string.
91 let align = |s: &[&str], cause: &str| {
93 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
95 let abi = parse_bits(s[0], "alignment", cause);
96 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
97 Align::from_bits(abi, pref).unwrap_or_else(|err| {
98 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
100 Align::from_bits(8, 8).unwrap()
104 let mut dl = TargetDataLayout::default();
105 let mut i128_align_src = 64;
106 for spec in sess.target.target.data_layout.split("-") {
107 match &spec.split(":").collect::<Vec<_>>()[..] {
108 &["e"] => dl.endian = Endian::Little,
109 &["E"] => dl.endian = Endian::Big,
110 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
111 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
112 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
113 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
114 dl.pointer_size = size(s, p);
115 dl.pointer_align = align(a, p);
117 &[s, ref a..] if s.starts_with("i") => {
118 let bits = match s[1..].parse::<u64>() {
121 size(&s[1..], "i"); // For the user error.
127 1 => dl.i1_align = a,
128 8 => dl.i8_align = a,
129 16 => dl.i16_align = a,
130 32 => dl.i32_align = a,
131 64 => dl.i64_align = a,
134 if bits >= i128_align_src && bits <= 128 {
135 // Default alignment for i128 is decided by taking the alignment of
136 // largest-sized i{64...128}.
137 i128_align_src = bits;
141 &[s, ref a..] if s.starts_with("v") => {
142 let v_size = size(&s[1..], "v");
144 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
148 // No existing entry, add a new one.
149 dl.vector_align.push((v_size, a));
151 _ => {} // Ignore everything else.
155 // Perform consistency checks against the Target information.
156 let endian_str = match dl.endian {
157 Endian::Little => "little",
160 if endian_str != sess.target.target.target_endian {
161 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
162 architecture is {}-endian, while \"target-endian\" is `{}`",
163 endian_str, sess.target.target.target_endian));
166 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
167 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
168 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
169 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
175 /// Return exclusive upper bound on object size.
177 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
178 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
179 /// index every address within an object along with one byte past the end, along with allowing
180 /// `isize` to store the difference between any two pointers into an object.
182 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
183 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
184 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
185 /// address space on 64-bit ARMv8 and x86_64.
186 pub fn obj_size_bound(&self) -> u64 {
187 match self.pointer_size.bits() {
191 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
195 pub fn ptr_sized_integer(&self) -> Integer {
196 match self.pointer_size.bits() {
200 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
205 /// Endianness of the target, which must match cfg(target-endian).
206 #[derive(Copy, Clone)]
212 /// Size of a type in bytes.
213 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
219 pub fn from_bits(bits: u64) -> Size {
220 Size::from_bytes((bits + 7) / 8)
223 pub fn from_bytes(bytes: u64) -> Size {
224 if bytes >= (1 << 61) {
225 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
232 pub fn bytes(self) -> u64 {
236 pub fn bits(self) -> u64 {
240 pub fn abi_align(self, align: Align) -> Size {
241 let mask = align.abi() - 1;
242 Size::from_bytes((self.bytes() + mask) & !mask)
245 pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option<Size> {
246 // Each Size is less than dl.obj_size_bound(), so the sum is
247 // also less than 1 << 62 (and therefore can't overflow).
248 let bytes = self.bytes() + offset.bytes();
250 if bytes < dl.obj_size_bound() {
251 Some(Size::from_bytes(bytes))
257 pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option<Size> {
258 // Each Size is less than dl.obj_size_bound(), so the sum is
259 // also less than 1 << 62 (and therefore can't overflow).
260 match self.bytes().checked_mul(count) {
261 Some(bytes) if bytes < dl.obj_size_bound() => {
262 Some(Size::from_bytes(bytes))
269 /// Alignment of a type in bytes, both ABI-mandated and preferred.
270 /// Since alignments are always powers of 2, we can pack both in one byte,
271 /// giving each a nibble (4 bits) for a maximum alignment of 2^15 = 32768.
272 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
278 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
279 Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
282 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
283 let pack = |align: u64| {
284 // Treat an alignment of 0 bytes like 1-byte alignment.
289 let mut bytes = align;
291 while (bytes & 1) == 0 {
296 Err(format!("`{}` is not a power of 2", align))
297 } else if pow > 0x0f {
298 Err(format!("`{}` is too large", align))
305 raw: pack(abi)? | (pack(pref)? << 4)
309 pub fn abi(self) -> u64 {
310 1 << (self.raw & 0xf)
313 pub fn pref(self) -> u64 {
317 pub fn min(self, other: Align) -> Align {
318 let abi = cmp::min(self.raw & 0x0f, other.raw & 0x0f);
319 let pref = cmp::min(self.raw & 0xf0, other.raw & 0xf0);
325 pub fn max(self, other: Align) -> Align {
326 let abi = cmp::max(self.raw & 0x0f, other.raw & 0x0f);
327 let pref = cmp::max(self.raw & 0xf0, other.raw & 0xf0);
334 /// Integers, also used for enum discriminants.
335 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
346 pub fn size(&self) -> Size {
348 I1 => Size::from_bits(1),
349 I8 => Size::from_bytes(1),
350 I16 => Size::from_bytes(2),
351 I32 => Size::from_bytes(4),
352 I64 => Size::from_bytes(8),
353 I128 => Size::from_bytes(16),
357 pub fn align(&self, dl: &TargetDataLayout)-> Align {
364 I128 => dl.i128_align,
368 pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>,
369 signed: bool) -> Ty<'tcx> {
370 match (*self, signed) {
371 (I1, false) => tcx.types.u8,
372 (I8, false) => tcx.types.u8,
373 (I16, false) => tcx.types.u16,
374 (I32, false) => tcx.types.u32,
375 (I64, false) => tcx.types.u64,
376 (I128, false) => tcx.types.u128,
377 (I1, true) => tcx.types.i8,
378 (I8, true) => tcx.types.i8,
379 (I16, true) => tcx.types.i16,
380 (I32, true) => tcx.types.i32,
381 (I64, true) => tcx.types.i64,
382 (I128, true) => tcx.types.i128,
386 /// Find the smallest Integer type which can represent the signed value.
387 pub fn fit_signed(x: i64) -> Integer {
389 -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1,
390 -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
391 -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
392 -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
393 -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
398 /// Find the smallest Integer type which can represent the unsigned value.
399 pub fn fit_unsigned(x: u64) -> Integer {
401 0...0x0000_0000_0000_0001 => I1,
402 0...0x0000_0000_0000_00ff => I8,
403 0...0x0000_0000_0000_ffff => I16,
404 0...0x0000_0000_ffff_ffff => I32,
405 0...0xffff_ffff_ffff_ffff => I64,
410 /// Find the smallest integer with the given alignment.
411 pub fn for_abi_align(dl: &TargetDataLayout, align: Align) -> Option<Integer> {
412 let wanted = align.abi();
413 for &candidate in &[I8, I16, I32, I64] {
414 let ty = Int(candidate);
415 if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
416 return Some(candidate);
422 /// Get the Integer type from an attr::IntType.
423 pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer {
425 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
426 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
427 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
428 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
429 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
430 attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
431 dl.ptr_sized_integer()
436 /// Find the appropriate Integer type and signedness for the given
437 /// signed discriminant range and #[repr] attribute.
438 /// N.B.: u64 values above i64::MAX will be treated as signed, but
439 /// that shouldn't affect anything, other than maybe debuginfo.
440 fn repr_discr(tcx: TyCtxt, ty: Ty, hints: &[attr::ReprAttr], min: i64, max: i64)
442 // Theoretically, negative values could be larger in unsigned representation
443 // than the unsigned representation of the signed minimum. However, if there
444 // are any negative values, the only valid unsigned representation is u64
445 // which can fit all i64 values, so the result remains unaffected.
446 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
447 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
449 let mut min_from_extern = None;
450 let min_default = I8;
452 for &r in hints.iter() {
454 attr::ReprInt(ity) => {
455 let discr = Integer::from_attr(&tcx.data_layout, ity);
456 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
458 bug!("Integer::repr_discr: `#[repr]` hint too small for \
459 discriminant range of enum `{}", ty)
461 return (discr, ity.is_signed());
463 attr::ReprExtern => {
464 match &tcx.sess.target.target.arch[..] {
465 // WARNING: the ARM EABI has two variants; the one corresponding
466 // to `at_least == I32` appears to be used on Linux and NetBSD,
467 // but some systems may use the variant corresponding to no
468 // lower bound. However, we don't run on those yet...?
469 "arm" => min_from_extern = Some(I32),
470 _ => min_from_extern = Some(I32),
474 attr::ReprPacked => {
475 bug!("Integer::repr_discr: found #[repr(packed)] on enum `{}", ty);
478 bug!("Integer::repr_discr: found #[repr(simd)] on enum `{}", ty);
483 let at_least = min_from_extern.unwrap_or(min_default);
485 // If there are no negative values, we can use the unsigned fit.
487 (cmp::max(unsigned_fit, at_least), false)
489 (cmp::max(signed_fit, at_least), true)
494 /// Fundamental unit of memory access and layout.
495 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
504 pub fn size(self, dl: &TargetDataLayout) -> Size {
506 Int(I1) | Int(I8) => Size::from_bits(8),
507 Int(I16) => Size::from_bits(16),
508 Int(I32) | F32 => Size::from_bits(32),
509 Int(I64) | F64 => Size::from_bits(64),
510 Int(I128) => Size::from_bits(128),
511 Pointer => dl.pointer_size
515 pub fn align(self, dl: &TargetDataLayout) -> Align {
517 Int(I1) => dl.i1_align,
518 Int(I8) => dl.i8_align,
519 Int(I16) => dl.i16_align,
520 Int(I32) => dl.i32_align,
521 Int(I64) => dl.i64_align,
522 Int(I128) => dl.i128_align,
525 Pointer => dl.pointer_align
530 /// Path through fields of nested structures.
531 // FIXME(eddyb) use small vector optimization for the common case.
532 pub type FieldPath = Vec<u32>;
534 /// A structure, a product type in ADT terms.
535 #[derive(PartialEq, Eq, Hash, Debug)]
539 /// If true, no alignment padding is used.
542 /// If true, the size is exact, otherwise it's only a lower bound.
545 /// Offsets for the first byte of each field, ordered to match the source definition order.
546 /// This vector does not go in increasing order.
547 /// FIXME(eddyb) use small vector optimization for the common case.
548 pub offsets: Vec<Size>,
550 /// Maps source order field indices to memory order indices, depending how fields were permuted.
551 /// FIXME (camlorn) also consider small vector optimization here.
552 pub memory_index: Vec<u32>,
557 // Info required to optimize struct layout.
558 #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
560 // A tuple, closure, or univariant which cannot be coerced to unsized.
561 AlwaysSizedUnivariant,
562 // A univariant, the last field of which may be coerced to unsized.
563 MaybeUnsizedUnivariant,
564 // A univariant, but part of an enum.
568 impl<'a, 'gcx, 'tcx> Struct {
569 // FIXME(camlorn): reprs need a better representation to deal with multiple reprs on one type.
570 fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
571 reprs: &[attr::ReprAttr], kind: StructKind,
572 scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
573 let packed = reprs.contains(&attr::ReprPacked);
574 let mut ret = Struct {
575 align: if packed { dl.i8_align } else { dl.aggregate_align },
579 memory_index: vec![],
580 min_size: Size::from_bytes(0),
583 // Anything with ReprExtern or ReprPacked doesn't optimize.
584 // Neither do 1-member and 2-member structs.
585 // In addition, code in trans assume that 2-element structs can become pairs.
586 // It's easier to just short-circuit here.
587 let mut can_optimize = fields.len() > 2 || StructKind::EnumVariant == kind;
589 // This exhaustive match makes new reprs force the adder to modify this function.
590 // Otherwise, things can silently break.
591 // Note the inversion, return true to stop optimizing.
592 can_optimize = !reprs.iter().any(|r| {
594 attr::ReprAny | attr::ReprInt(_) => false,
595 attr::ReprExtern | attr::ReprPacked => true,
596 attr::ReprSimd => bug!("Simd vectors should be represented as layout::Vector")
601 // Disable field reordering until we can decide what to do.
602 // The odd pattern here avoids a warning about the value never being read.
603 if can_optimize { can_optimize = false }
605 let (optimize, sort_ascending) = match kind {
606 StructKind::AlwaysSizedUnivariant => (can_optimize, false),
607 StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
608 StructKind::EnumVariant => {
609 assert!(fields.len() >= 1, "Enum variants must have discriminants.");
610 (can_optimize && fields[0].size(dl).bytes() == 1, true)
614 ret.offsets = vec![Size::from_bytes(0); fields.len()];
615 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
618 let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
619 let end = if let StructKind::MaybeUnsizedUnivariant = kind {
625 let optimizing = &mut inverse_memory_index[start..end];
627 optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
629 optimizing.sort_by(| &a, &b | {
630 let a = fields[a as usize].align(dl).abi();
631 let b = fields[b as usize].align(dl).abi();
638 // inverse_memory_index holds field indices by increasing memory offset.
639 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
640 // We now write field offsets to the corresponding offset slot;
641 // field 5 with offset 0 puts 0 in offsets[5].
642 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
644 if let StructKind::EnumVariant = kind {
645 assert_eq!(inverse_memory_index[0], 0,
646 "Enum variant discriminants must have the lowest offset.");
649 let mut offset = Size::from_bytes(0);
651 for i in inverse_memory_index.iter() {
652 let field = fields[*i as usize];
654 bug!("Struct::new: field #{} of `{}` comes after unsized field",
655 ret.offsets.len(), scapegoat);
658 if field.is_unsized() {
662 // Invariant: offset < dl.obj_size_bound() <= 1<<61
664 let align = field.align(dl);
665 ret.align = ret.align.max(align);
666 offset = offset.abi_align(align);
669 debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
670 ret.offsets[*i as usize] = offset;
672 offset = offset.checked_add(field.size(dl), dl)
673 .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
677 debug!("Struct::new min_size: {:?}", offset);
678 ret.min_size = offset;
680 // As stated above, inverse_memory_index holds field indices by increasing offset.
681 // This makes it an already-sorted view of the offsets vec.
682 // To invert it, consider:
683 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
684 // Field 5 would be the first element, so memory_index is i:
685 // Note: if we didn't optimize, it's already right.
688 ret.memory_index = vec![0; inverse_memory_index.len()];
690 for i in 0..inverse_memory_index.len() {
691 ret.memory_index[inverse_memory_index[i] as usize] = i as u32;
694 ret.memory_index = inverse_memory_index;
700 /// Get the size with trailing alignment padding.
701 pub fn stride(&self) -> Size {
702 self.min_size.abi_align(self.align)
705 /// Determine whether a structure would be zero-sized, given its fields.
706 pub fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
707 -> Result<bool, LayoutError<'gcx>>
708 where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
709 for field in fields {
711 if field.is_unsized() || field.size(dl).bytes() > 0 {
718 /// Get indices of the tys that made this struct by increasing offset.
720 pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
721 let mut inverse_small = [0u8; 64];
722 let mut inverse_big = vec![];
723 let use_small = self.memory_index.len() <= inverse_small.len();
725 // We have to write this logic twice in order to keep the array small.
727 for i in 0..self.memory_index.len() {
728 inverse_small[self.memory_index[i] as usize] = i as u8;
731 inverse_big = vec![0; self.memory_index.len()];
732 for i in 0..self.memory_index.len() {
733 inverse_big[self.memory_index[i] as usize] = i as u32;
737 (0..self.memory_index.len()).map(move |i| {
738 if use_small { inverse_small[i] as usize }
739 else { inverse_big[i] as usize }
743 /// Find the path leading to a non-zero leaf field, starting from
744 /// the given type and recursing through aggregates.
745 /// The tuple is `(path, source_path)`,
746 /// where `path` is in memory order and `source_path` in source order.
747 // FIXME(eddyb) track value ranges and traverse already optimized enums.
748 fn non_zero_field_in_type(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
750 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'gcx>> {
751 let tcx = infcx.tcx.global_tcx();
752 match (ty.layout(infcx)?, &ty.sty) {
753 (&Scalar { non_zero: true, .. }, _) |
754 (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
755 (&FatPointer { non_zero: true, .. }, _) => {
756 Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
759 // Is this the NonZero lang item wrapping a pointer or integer type?
760 (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
761 let fields = &def.struct_variant().fields;
762 assert_eq!(fields.len(), 1);
763 match *fields[0].ty(tcx, substs).layout(infcx)? {
764 // FIXME(eddyb) also allow floating-point types here.
765 Scalar { value: Int(_), non_zero: false } |
766 Scalar { value: Pointer, non_zero: false } => {
767 Ok(Some((vec![0], vec![0])))
769 FatPointer { non_zero: false, .. } => {
770 let tmp = vec![FAT_PTR_ADDR as u32, 0];
771 Ok(Some((tmp.clone(), tmp)))
777 // Perhaps one of the fields of this struct is non-zero
778 // let's recurse and find out
779 (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
780 Struct::non_zero_field_paths(infcx, def.struct_variant().fields
781 .iter().map(|field| {
782 field.ty(tcx, substs)
784 Some(&variant.memory_index[..]))
787 // Perhaps one of the upvars of this closure is non-zero
788 (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
789 let upvar_tys = substs.upvar_tys(def, tcx);
790 Struct::non_zero_field_paths(infcx, upvar_tys,
791 Some(&variant.memory_index[..]))
793 // Can we use one of the fields in this tuple?
794 (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
795 Struct::non_zero_field_paths(infcx, tys.iter().cloned(),
796 Some(&variant.memory_index[..]))
799 // Is this a fixed-size array of something non-zero
800 // with at least one element?
801 (_, &ty::TyArray(ety, d)) if d > 0 => {
802 Struct::non_zero_field_paths(infcx, Some(ety).into_iter(), None)
805 (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
806 let normalized = normalize_associated_type(infcx, ty);
807 if ty == normalized {
810 return Struct::non_zero_field_in_type(infcx, normalized);
813 // Anything else is not a non-zero type.
818 /// Find the path leading to a non-zero leaf field, starting from
819 /// the given set of fields and recursing through aggregates.
820 /// Returns Some((path, source_path)) on success.
821 /// `path` is translated to memory order. `source_path` is not.
822 fn non_zero_field_paths<I>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
824 permutation: Option<&[u32]>)
825 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'gcx>>
826 where I: Iterator<Item=Ty<'gcx>> {
827 for (i, ty) in fields.enumerate() {
828 if let Some((mut path, mut source_path)) = Struct::non_zero_field_in_type(infcx, ty)? {
829 source_path.push(i as u32);
830 let index = if let Some(p) = permutation {
835 path.push(index as u32);
836 return Ok(Some((path, source_path)));
843 /// An untagged union.
844 #[derive(PartialEq, Eq, Hash, Debug)]
850 /// If true, no alignment padding is used.
854 impl<'a, 'gcx, 'tcx> Union {
855 pub fn new(dl: &TargetDataLayout, packed: bool) -> Union {
857 align: if packed { dl.i8_align } else { dl.aggregate_align },
858 min_size: Size::from_bytes(0),
863 /// Extend the Struct with more fields.
864 pub fn extend<I>(&mut self, dl: &TargetDataLayout,
867 -> Result<(), LayoutError<'gcx>>
868 where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
869 for (index, field) in fields.enumerate() {
871 if field.is_unsized() {
872 bug!("Union::extend: field #{} of `{}` is unsized",
876 debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
879 self.align = self.align.max(field.align(dl));
881 self.min_size = cmp::max(self.min_size, field.size(dl));
884 debug!("Union::extend min-size: {:?}", self.min_size);
889 /// Get the size with trailing alignment padding.
890 pub fn stride(&self) -> Size {
891 self.min_size.abi_align(self.align)
895 /// The first half of a fat pointer.
896 /// - For a trait object, this is the address of the box.
897 /// - For a slice, this is the base address.
898 pub const FAT_PTR_ADDR: usize = 0;
900 /// The second half of a fat pointer.
901 /// - For a trait object, this is the address of the vtable.
902 /// - For a slice, this is the length.
903 pub const FAT_PTR_EXTRA: usize = 1;
905 /// Type layout, from which size and alignment can be cheaply computed.
906 /// For ADTs, it also includes field placement and enum optimizations.
907 /// NOTE: Because Layout is interned, redundant information should be
908 /// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
909 #[derive(Debug, PartialEq, Eq, Hash)]
911 /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
914 // If true, the value cannot represent a bit pattern of all zeroes.
918 /// SIMD vectors, from structs marked with #[repr(simd)].
924 /// TyArray, TySlice or TyStr.
926 /// If true, the size is exact, otherwise it's only a lower bound.
932 /// TyRawPtr or TyRef with a !Sized pointee.
935 // If true, the pointer cannot be null.
939 // Remaining variants are all ADTs such as structs, enums or tuples.
941 /// C-like enums; basically an integer.
946 // Inclusive discriminant range.
947 // If min > max, it represents min...u64::MAX followed by 0...max.
948 // FIXME(eddyb) always use the shortest range, e.g. by finding
949 // the largest space between two consecutive discriminants and
950 // taking everything else as the (shortest) discriminant range.
955 /// Single-case enums, and structs/tuples.
958 // If true, the structure is NonZero.
959 // FIXME(eddyb) use a newtype Layout kind for this.
968 /// General-case enums: for each case there is a struct, and they
969 /// all start with a field for the discriminant.
972 variants: Vec<Struct>,
977 /// Two cases distinguished by a nullable pointer: the case with discriminant
978 /// `nndiscr` must have single field which is known to be nonnull due to its type.
979 /// The other case is known to be zero sized. Hence we represent the enum
980 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
981 /// otherwise it indicates the other case.
983 /// For example, `std::option::Option` instantiated at a safe pointer type
984 /// is represented such that `None` is a null pointer and `Some` is the
985 /// identity function.
991 /// Two cases distinguished by a nullable pointer: the case with discriminant
992 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
993 /// field is known to be nonnull due to its type; if that field is null, then
994 /// it represents the other case, which is known to be zero sized.
995 StructWrappedNullablePointer {
998 // N.B. There is a 0 at the start, for LLVM GEP through a pointer.
999 discrfield: FieldPath,
1000 // Like discrfield, but in source order. For debuginfo.
1001 discrfield_source: FieldPath
1005 #[derive(Copy, Clone, Debug)]
1006 pub enum LayoutError<'tcx> {
1008 SizeOverflow(Ty<'tcx>)
1011 impl<'tcx> fmt::Display for LayoutError<'tcx> {
1012 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1014 LayoutError::Unknown(ty) => {
1015 write!(f, "the type `{:?}` has an unknown layout", ty)
1017 LayoutError::SizeOverflow(ty) => {
1018 write!(f, "the type `{:?}` is too big for the current architecture", ty)
1024 /// Helper function for normalizing associated types in an inference context.
1025 fn normalize_associated_type<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
1028 if !ty.has_projection_types() {
1032 let mut selcx = traits::SelectionContext::new(infcx);
1033 let cause = traits::ObligationCause::dummy();
1034 let traits::Normalized { value: result, obligations } =
1035 traits::normalize(&mut selcx, cause, &ty);
1037 let mut fulfill_cx = traits::FulfillmentContext::new();
1039 for obligation in obligations {
1040 fulfill_cx.register_predicate_obligation(infcx, obligation);
1043 infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
1046 impl<'a, 'gcx, 'tcx> Layout {
1047 pub fn compute_uncached(ty: Ty<'gcx>,
1048 infcx: &InferCtxt<'a, 'gcx, 'tcx>)
1049 -> Result<&'gcx Layout, LayoutError<'gcx>> {
1050 let tcx = infcx.tcx.global_tcx();
1051 let success = |layout| Ok(tcx.intern_layout(layout));
1052 let dl = &tcx.data_layout;
1053 assert!(!ty.has_infer_types());
1055 let ptr_layout = |pointee: Ty<'gcx>| {
1056 let non_zero = !ty.is_unsafe_ptr();
1057 let pointee = normalize_associated_type(infcx, pointee);
1058 if pointee.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
1059 Ok(Scalar { value: Pointer, non_zero: non_zero })
1061 let unsized_part = tcx.struct_tail(pointee);
1062 let meta = match unsized_part.sty {
1063 ty::TySlice(_) | ty::TyStr => {
1064 Int(dl.ptr_sized_integer())
1066 ty::TyDynamic(..) => Pointer,
1067 _ => return Err(LayoutError::Unknown(unsized_part))
1069 Ok(FatPointer { metadata: meta, non_zero: non_zero })
1073 let layout = match ty.sty {
1075 ty::TyBool => Scalar { value: Int(I1), non_zero: false },
1076 ty::TyChar => Scalar { value: Int(I32), non_zero: false },
1079 value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
1083 ty::TyUint(ity) => {
1085 value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
1089 ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
1090 ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
1091 ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
1094 ty::TyNever => Univariant {
1095 variant: Struct::new(dl, &vec![], &[],
1096 StructKind::AlwaysSizedUnivariant, ty)?,
1100 // Potentially-fat pointers.
1101 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1102 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1103 ptr_layout(pointee)?
1105 ty::TyAdt(def, _) if def.is_box() => {
1106 ptr_layout(ty.boxed_ty())?
1109 // Arrays and slices.
1110 ty::TyArray(element, count) => {
1111 let element = element.layout(infcx)?;
1114 align: element.align(dl),
1115 size: element.size(dl).checked_mul(count as u64, dl)
1116 .map_or(Err(LayoutError::SizeOverflow(ty)), Ok)?
1119 ty::TySlice(element) => {
1122 align: element.layout(infcx)?.align(dl),
1123 size: Size::from_bytes(0)
1130 size: Size::from_bytes(0)
1135 ty::TyFnDef(..) => {
1137 variant: Struct::new(dl, &vec![],
1138 &[], StructKind::AlwaysSizedUnivariant, ty)?,
1142 ty::TyDynamic(..) => {
1143 let mut unit = Struct::new(dl, &vec![], &[],
1144 StructKind::AlwaysSizedUnivariant, ty)?;
1146 Univariant { variant: unit, non_zero: false }
1149 // Tuples and closures.
1150 ty::TyClosure(def_id, ref substs) => {
1151 let tys = substs.upvar_tys(def_id, tcx);
1152 let st = Struct::new(dl,
1153 &tys.map(|ty| ty.layout(infcx))
1154 .collect::<Result<Vec<_>, _>>()?,
1156 StructKind::AlwaysSizedUnivariant, ty)?;
1157 Univariant { variant: st, non_zero: false }
1160 ty::TyTuple(tys, _) => {
1161 // FIXME(camlorn): if we ever allow unsized tuples, this needs to be checked.
1162 // See the univariant case below to learn how.
1163 let st = Struct::new(dl,
1164 &tys.iter().map(|ty| ty.layout(infcx))
1165 .collect::<Result<Vec<_>, _>>()?,
1166 &[], StructKind::AlwaysSizedUnivariant, ty)?;
1167 Univariant { variant: st, non_zero: false }
1170 // SIMD vector types.
1171 ty::TyAdt(def, ..) if def.is_simd() => {
1172 let element = ty.simd_type(tcx);
1173 match *element.layout(infcx)? {
1174 Scalar { value, .. } => {
1175 return success(Vector {
1177 count: ty.simd_size(tcx) as u64
1181 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1182 a non-machine element type `{}`",
1189 ty::TyAdt(def, substs) => {
1190 let hints = &tcx.lookup_repr_hints(def.did)[..];
1192 if def.variants.is_empty() {
1193 // Uninhabitable; represent as unit
1194 // (Typechecking will reject discriminant-sizing attrs.)
1195 assert_eq!(hints.len(), 0);
1197 return success(Univariant {
1198 variant: Struct::new(dl, &vec![],
1199 &hints[..], StructKind::AlwaysSizedUnivariant, ty)?,
1204 if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
1205 // All bodies empty -> intlike
1206 let (mut min, mut max, mut non_zero) = (i64::max_value(),
1209 for v in &def.variants {
1210 let x = match v.disr_val.erase_type() {
1211 ConstInt::InferSigned(i) => i as i64,
1212 ConstInt::Infer(i) => i as u64 as i64,
1215 if x == 0 { non_zero = false; }
1216 if x < min { min = x; }
1217 if x > max { max = x; }
1220 // FIXME: should handle i128? signed-value based impl is weird and hard to
1222 let (discr, signed) = Integer::repr_discr(tcx, ty, &hints[..],
1225 return success(CEnum {
1229 // FIXME: should be u128?
1235 if !def.is_enum() || def.variants.len() == 1 && hints.is_empty() {
1236 // Struct, or union, or univariant enum equivalent to a struct.
1237 // (Typechecking will reject discriminant-sizing attrs.)
1239 let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
1240 StructKind::AlwaysSizedUnivariant
1242 use middle::region::ROOT_CODE_EXTENT;
1243 let param_env = tcx.construct_parameter_environment(DUMMY_SP,
1244 def.did, ROOT_CODE_EXTENT);
1245 let fields = &def.variants[0].fields;
1246 let last_field = &fields[fields.len()-1];
1247 let always_sized = last_field.ty(tcx, param_env.free_substs)
1248 .is_sized(tcx, ¶m_env, DUMMY_SP);
1249 if !always_sized { StructKind::MaybeUnsizedUnivariant }
1250 else { StructKind::AlwaysSizedUnivariant }
1253 let fields = def.variants[0].fields.iter().map(|field| {
1254 field.ty(tcx, substs).layout(infcx)
1255 }).collect::<Result<Vec<_>, _>>()?;
1256 let packed = tcx.lookup_packed(def.did);
1257 let layout = if def.is_union() {
1258 let mut un = Union::new(dl, packed);
1259 un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
1260 UntaggedUnion { variants: un }
1262 let st = Struct::new(dl, &fields, &hints[..],
1264 let non_zero = Some(def.did) == tcx.lang_items.non_zero();
1265 Univariant { variant: st, non_zero: non_zero }
1267 return success(layout);
1270 // Since there's at least one
1271 // non-empty body, explicit discriminants should have
1272 // been rejected by a checker before this point.
1273 for (i, v) in def.variants.iter().enumerate() {
1274 if i as u128 != v.disr_val.to_u128_unchecked() {
1275 bug!("non-C-like enum {} with specified discriminants",
1276 tcx.item_path_str(def.did));
1280 // Cache the substituted and normalized variant field types.
1281 let variants = def.variants.iter().map(|v| {
1282 v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
1283 }).collect::<Vec<_>>();
1285 if variants.len() == 2 && hints.is_empty() {
1286 // Nullable pointer optimization
1288 let other_fields = variants[1 - discr].iter().map(|ty| {
1291 if !Struct::would_be_zero_sized(dl, other_fields)? {
1294 let paths = Struct::non_zero_field_paths(infcx,
1295 variants[discr].iter().cloned(),
1297 let (mut path, mut path_source) = if let Some(p) = paths { p }
1300 // FIXME(eddyb) should take advantage of a newtype.
1301 if path == &[0] && variants[discr].len() == 1 {
1302 let value = match *variants[discr][0].layout(infcx)? {
1303 Scalar { value, .. } => value,
1304 CEnum { discr, .. } => Int(discr),
1305 _ => bug!("Layout::compute: `{}`'s non-zero \
1306 `{}` field not scalar?!",
1307 ty, variants[discr][0])
1309 return success(RawNullablePointer {
1310 nndiscr: discr as u64,
1315 let st = Struct::new(dl,
1316 &variants[discr].iter().map(|ty| ty.layout(infcx))
1317 .collect::<Result<Vec<_>, _>>()?,
1318 &hints[..], StructKind::AlwaysSizedUnivariant, ty)?;
1320 // We have to fix the last element of path here.
1321 let mut i = *path.last().unwrap();
1322 i = st.memory_index[i as usize];
1323 *path.last_mut().unwrap() = i;
1324 path.push(0); // For GEP through a pointer.
1326 path_source.push(0);
1327 path_source.reverse();
1329 return success(StructWrappedNullablePointer {
1330 nndiscr: discr as u64,
1333 discrfield_source: path_source
1338 // The general case.
1339 let discr_max = (variants.len() - 1) as i64;
1340 assert!(discr_max >= 0);
1341 let (min_ity, _) = Integer::repr_discr(tcx, ty, &hints[..], 0, discr_max);
1343 let mut align = dl.aggregate_align;
1344 let mut size = Size::from_bytes(0);
1346 // We're interested in the smallest alignment, so start large.
1347 let mut start_align = Align::from_bytes(256, 256).unwrap();
1349 // Create the set of structs that represent each variant
1350 // Use the minimum integer type we figured out above
1351 let discr = Scalar { value: Int(min_ity), non_zero: false };
1352 let mut variants = variants.into_iter().map(|fields| {
1353 let mut fields = fields.into_iter().map(|field| {
1355 }).collect::<Result<Vec<_>, _>>()?;
1356 fields.insert(0, &discr);
1357 let st = Struct::new(dl,
1359 &hints[..], StructKind::EnumVariant, ty)?;
1360 // Find the first field we can't move later
1361 // to make room for a larger discriminant.
1362 // It is important to skip the first field.
1363 for i in st.field_index_by_increasing_offset().skip(1) {
1364 let field = fields[i];
1365 let field_align = field.align(dl);
1366 if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1367 start_align = start_align.min(field_align);
1371 size = cmp::max(size, st.min_size);
1372 align = align.max(st.align);
1374 }).collect::<Result<Vec<_>, _>>()?;
1376 // Align the maximum variant size to the largest alignment.
1377 size = size.abi_align(align);
1379 if size.bytes() >= dl.obj_size_bound() {
1380 return Err(LayoutError::SizeOverflow(ty));
1383 // Check to see if we should use a different type for the
1384 // discriminant. We can safely use a type with the same size
1385 // as the alignment of the first field of each variant.
1386 // We increase the size of the discriminant to avoid LLVM copying
1387 // padding when it doesn't need to. This normally causes unaligned
1388 // load/stores and excessive memcpy/memset operations. By using a
1389 // bigger integer size, LLVM can be sure about it's contents and
1390 // won't be so conservative.
1392 // Use the initial field alignment
1393 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
1395 // If the alignment is not larger than the chosen discriminant size,
1396 // don't use the alignment as the final size.
1400 // Patch up the variants' first few fields.
1401 let old_ity_size = Int(min_ity).size(dl);
1402 let new_ity_size = Int(ity).size(dl);
1403 for variant in &mut variants {
1404 for i in variant.offsets.iter_mut() {
1405 // The first field is the discrimminant, at offset 0.
1406 // These aren't in order, and we need to skip it.
1407 if *i <= old_ity_size && *i > Size::from_bytes(0) {
1411 // We might be making the struct larger.
1412 if variant.min_size <= old_ity_size {
1413 variant.min_size = new_ity_size;
1426 // Types with no meaningful known layout.
1427 ty::TyProjection(_) | ty::TyAnon(..) => {
1428 let normalized = normalize_associated_type(infcx, ty);
1429 if ty == normalized {
1430 return Err(LayoutError::Unknown(ty));
1432 return normalized.layout(infcx);
1435 return Err(LayoutError::Unknown(ty));
1437 ty::TyInfer(_) | ty::TyError => {
1438 bug!("Layout::compute: unexpected type `{}`", ty)
1445 /// Returns true if the layout corresponds to an unsized type.
1446 pub fn is_unsized(&self) -> bool {
1448 Scalar {..} | Vector {..} | FatPointer {..} |
1449 CEnum {..} | UntaggedUnion {..} | General {..} |
1450 RawNullablePointer {..} |
1451 StructWrappedNullablePointer {..} => false,
1453 Array { sized, .. } |
1454 Univariant { variant: Struct { sized, .. }, .. } => !sized
1458 pub fn size(&self, dl: &TargetDataLayout) -> Size {
1460 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1464 Vector { element, count } => {
1465 let elem_size = element.size(dl);
1466 let vec_size = match elem_size.checked_mul(count, dl) {
1468 None => bug!("Layout::size({:?}): {} * {} overflowed",
1469 self, elem_size.bytes(), count)
1471 vec_size.abi_align(self.align(dl))
1474 FatPointer { metadata, .. } => {
1475 // Effectively a (ptr, meta) tuple.
1476 Pointer.size(dl).abi_align(metadata.align(dl))
1477 .checked_add(metadata.size(dl), dl).unwrap()
1478 .abi_align(self.align(dl))
1481 CEnum { discr, .. } => Int(discr).size(dl),
1482 Array { size, .. } | General { size, .. } => size,
1483 UntaggedUnion { ref variants } => variants.stride(),
1485 Univariant { ref variant, .. } |
1486 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1492 pub fn align(&self, dl: &TargetDataLayout) -> Align {
1494 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1498 Vector { element, count } => {
1499 let elem_size = element.size(dl);
1500 let vec_size = match elem_size.checked_mul(count, dl) {
1502 None => bug!("Layout::align({:?}): {} * {} overflowed",
1503 self, elem_size.bytes(), count)
1505 for &(size, align) in &dl.vector_align {
1506 if size == vec_size {
1510 // Default to natural alignment, which is what LLVM does.
1511 // That is, use the size, rounded up to a power of 2.
1512 let align = vec_size.bytes().next_power_of_two();
1513 Align::from_bytes(align, align).unwrap()
1516 FatPointer { metadata, .. } => {
1517 // Effectively a (ptr, meta) tuple.
1518 Pointer.align(dl).max(metadata.align(dl))
1521 CEnum { discr, .. } => Int(discr).align(dl),
1522 Array { align, .. } | General { align, .. } => align,
1523 UntaggedUnion { ref variants } => variants.align,
1525 Univariant { ref variant, .. } |
1526 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1533 /// Type size "skeleton", i.e. the only information determining a type's size.
1534 /// While this is conservative, (aside from constant sizes, only pointers,
1535 /// newtypes thereof and null pointer optimized enums are allowed), it is
1536 /// enough to statically check common usecases of transmute.
1537 #[derive(Copy, Clone, Debug)]
1538 pub enum SizeSkeleton<'tcx> {
1539 /// Any statically computable Layout.
1542 /// A potentially-fat pointer.
1544 // If true, this pointer is never null.
1546 // The type which determines the unsized metadata, if any,
1547 // of this pointer. Either a type parameter or a projection
1548 // depending on one, with regions erased.
1553 impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> {
1554 pub fn compute(ty: Ty<'gcx>, infcx: &InferCtxt<'a, 'gcx, 'tcx>)
1555 -> Result<SizeSkeleton<'gcx>, LayoutError<'gcx>> {
1556 let tcx = infcx.tcx.global_tcx();
1557 assert!(!ty.has_infer_types());
1559 // First try computing a static layout.
1560 let err = match ty.layout(infcx) {
1562 return Ok(SizeSkeleton::Known(layout.size(&tcx.data_layout)));
1567 let ptr_skeleton = |pointee: Ty<'gcx>| {
1568 let non_zero = !ty.is_unsafe_ptr();
1569 let tail = tcx.struct_tail(pointee);
1571 ty::TyParam(_) | ty::TyProjection(_) => {
1572 assert!(tail.has_param_types() || tail.has_self_ty());
1573 Ok(SizeSkeleton::Pointer {
1575 tail: tcx.erase_regions(&tail)
1579 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1580 tail `{}` is not a type parameter or a projection",
1587 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1588 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1589 ptr_skeleton(pointee)
1591 ty::TyAdt(def, _) if def.is_box() => {
1592 ptr_skeleton(ty.boxed_ty())
1595 ty::TyAdt(def, substs) => {
1596 // Only newtypes and enums w/ nullable pointer optimization.
1597 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1601 // Get a zero-sized variant or a pointer newtype.
1602 let zero_or_ptr_variant = |i: usize| {
1603 let fields = def.variants[i].fields.iter().map(|field| {
1604 SizeSkeleton::compute(field.ty(tcx, substs), infcx)
1607 for field in fields {
1610 SizeSkeleton::Known(size) => {
1611 if size.bytes() > 0 {
1615 SizeSkeleton::Pointer {..} => {
1626 let v0 = zero_or_ptr_variant(0)?;
1628 if def.variants.len() == 1 {
1629 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1630 return Ok(SizeSkeleton::Pointer {
1631 non_zero: non_zero ||
1632 Some(def.did) == tcx.lang_items.non_zero(),
1640 let v1 = zero_or_ptr_variant(1)?;
1641 // Nullable pointer enum optimization.
1643 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1644 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1645 Ok(SizeSkeleton::Pointer {
1654 ty::TyProjection(_) | ty::TyAnon(..) => {
1655 let normalized = normalize_associated_type(infcx, ty);
1656 if ty == normalized {
1659 SizeSkeleton::compute(normalized, infcx)
1667 pub fn same_size(self, other: SizeSkeleton) -> bool {
1668 match (self, other) {
1669 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1670 (SizeSkeleton::Pointer { tail: a, .. },
1671 SizeSkeleton::Pointer { tail: b, .. }) => a == b,