1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 pub use self::Integer::*;
12 pub use self::Layout::*;
13 pub use self::Primitive::*;
18 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
20 use syntax::ast::{FloatTy, IntTy, UintTy};
22 use syntax_pos::DUMMY_SP;
30 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
31 /// for a target, which contains everything needed to compute layouts.
32 pub struct TargetDataLayout {
39 pub i128_align: Align,
42 pub pointer_size: Size,
43 pub pointer_align: Align,
44 pub aggregate_align: Align,
46 /// Alignments for vector types.
47 pub vector_align: Vec<(Size, Align)>
50 impl Default for TargetDataLayout {
51 /// Creates an instance of `TargetDataLayout`.
52 fn default() -> TargetDataLayout {
55 i1_align: Align::from_bits(8, 8).unwrap(),
56 i8_align: Align::from_bits(8, 8).unwrap(),
57 i16_align: Align::from_bits(16, 16).unwrap(),
58 i32_align: Align::from_bits(32, 32).unwrap(),
59 i64_align: Align::from_bits(32, 64).unwrap(),
60 i128_align: Align::from_bits(32, 64).unwrap(),
61 f32_align: Align::from_bits(32, 32).unwrap(),
62 f64_align: Align::from_bits(64, 64).unwrap(),
63 pointer_size: Size::from_bits(64),
64 pointer_align: Align::from_bits(64, 64).unwrap(),
65 aggregate_align: Align::from_bits(0, 64).unwrap(),
67 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
68 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
74 impl TargetDataLayout {
75 pub fn parse(sess: &Session) -> TargetDataLayout {
76 // Parse a bit count from a string.
77 let parse_bits = |s: &str, kind: &str, cause: &str| {
78 s.parse::<u64>().unwrap_or_else(|err| {
79 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
80 kind, s, cause, err));
85 // Parse a size string.
86 let size = |s: &str, cause: &str| {
87 Size::from_bits(parse_bits(s, "size", cause))
90 // Parse an alignment string.
91 let align = |s: &[&str], cause: &str| {
93 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
95 let abi = parse_bits(s[0], "alignment", cause);
96 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
97 Align::from_bits(abi, pref).unwrap_or_else(|err| {
98 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
100 Align::from_bits(8, 8).unwrap()
104 let mut dl = TargetDataLayout::default();
105 let mut i128_align_src = 64;
106 for spec in sess.target.target.data_layout.split("-") {
107 match &spec.split(":").collect::<Vec<_>>()[..] {
108 &["e"] => dl.endian = Endian::Little,
109 &["E"] => dl.endian = Endian::Big,
110 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
111 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
112 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
113 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
114 dl.pointer_size = size(s, p);
115 dl.pointer_align = align(a, p);
117 &[s, ref a..] if s.starts_with("i") => {
118 let bits = match s[1..].parse::<u64>() {
121 size(&s[1..], "i"); // For the user error.
127 1 => dl.i1_align = a,
128 8 => dl.i8_align = a,
129 16 => dl.i16_align = a,
130 32 => dl.i32_align = a,
131 64 => dl.i64_align = a,
134 if bits >= i128_align_src && bits <= 128 {
135 // Default alignment for i128 is decided by taking the alignment of
136 // largest-sized i{64...128}.
137 i128_align_src = bits;
141 &[s, ref a..] if s.starts_with("v") => {
142 let v_size = size(&s[1..], "v");
144 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
148 // No existing entry, add a new one.
149 dl.vector_align.push((v_size, a));
151 _ => {} // Ignore everything else.
155 // Perform consistency checks against the Target information.
156 let endian_str = match dl.endian {
157 Endian::Little => "little",
160 if endian_str != sess.target.target.target_endian {
161 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
162 architecture is {}-endian, while \"target-endian\" is `{}`",
163 endian_str, sess.target.target.target_endian));
166 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
167 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
168 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
169 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
175 /// Return exclusive upper bound on object size.
177 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
178 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
179 /// index every address within an object along with one byte past the end, along with allowing
180 /// `isize` to store the difference between any two pointers into an object.
182 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
183 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
184 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
185 /// address space on 64-bit ARMv8 and x86_64.
186 pub fn obj_size_bound(&self) -> u64 {
187 match self.pointer_size.bits() {
191 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
195 pub fn ptr_sized_integer(&self) -> Integer {
196 match self.pointer_size.bits() {
200 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
205 pub trait HasDataLayout: Copy {
206 fn data_layout(&self) -> &TargetDataLayout;
209 impl<'a> HasDataLayout for &'a TargetDataLayout {
210 fn data_layout(&self) -> &TargetDataLayout {
215 /// Endianness of the target, which must match cfg(target-endian).
216 #[derive(Copy, Clone)]
222 /// Size of a type in bytes.
223 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
229 pub fn from_bits(bits: u64) -> Size {
230 Size::from_bytes((bits + 7) / 8)
233 pub fn from_bytes(bytes: u64) -> Size {
234 if bytes >= (1 << 61) {
235 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
242 pub fn bytes(self) -> u64 {
246 pub fn bits(self) -> u64 {
250 pub fn abi_align(self, align: Align) -> Size {
251 let mask = align.abi() - 1;
252 Size::from_bytes((self.bytes() + mask) & !mask)
255 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
256 let dl = cx.data_layout();
258 // Each Size is less than dl.obj_size_bound(), so the sum is
259 // also less than 1 << 62 (and therefore can't overflow).
260 let bytes = self.bytes() + offset.bytes();
262 if bytes < dl.obj_size_bound() {
263 Some(Size::from_bytes(bytes))
269 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
270 let dl = cx.data_layout();
272 // Each Size is less than dl.obj_size_bound(), so the sum is
273 // also less than 1 << 62 (and therefore can't overflow).
274 match self.bytes().checked_mul(count) {
275 Some(bytes) if bytes < dl.obj_size_bound() => {
276 Some(Size::from_bytes(bytes))
283 /// Alignment of a type in bytes, both ABI-mandated and preferred.
284 /// Since alignments are always powers of 2, we can pack both in one byte,
285 /// giving each a nibble (4 bits) for a maximum alignment of 2<sup>15</sup> = 32768.
286 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
292 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
293 Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
296 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
297 let pack = |align: u64| {
298 // Treat an alignment of 0 bytes like 1-byte alignment.
303 let mut bytes = align;
305 while (bytes & 1) == 0 {
310 Err(format!("`{}` is not a power of 2", align))
311 } else if pow > 0x0f {
312 Err(format!("`{}` is too large", align))
319 raw: pack(abi)? | (pack(pref)? << 4)
323 pub fn abi(self) -> u64 {
324 1 << (self.raw & 0xf)
327 pub fn pref(self) -> u64 {
331 pub fn min(self, other: Align) -> Align {
332 let abi = cmp::min(self.raw & 0x0f, other.raw & 0x0f);
333 let pref = cmp::min(self.raw & 0xf0, other.raw & 0xf0);
339 pub fn max(self, other: Align) -> Align {
340 let abi = cmp::max(self.raw & 0x0f, other.raw & 0x0f);
341 let pref = cmp::max(self.raw & 0xf0, other.raw & 0xf0);
348 /// Integers, also used for enum discriminants.
349 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
360 pub fn size(&self) -> Size {
362 I1 => Size::from_bits(1),
363 I8 => Size::from_bytes(1),
364 I16 => Size::from_bytes(2),
365 I32 => Size::from_bytes(4),
366 I64 => Size::from_bytes(8),
367 I128 => Size::from_bytes(16),
371 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
372 let dl = cx.data_layout();
380 I128 => dl.i128_align,
384 pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>,
385 signed: bool) -> Ty<'tcx> {
386 match (*self, signed) {
387 (I1, false) => tcx.types.u8,
388 (I8, false) => tcx.types.u8,
389 (I16, false) => tcx.types.u16,
390 (I32, false) => tcx.types.u32,
391 (I64, false) => tcx.types.u64,
392 (I128, false) => tcx.types.u128,
393 (I1, true) => tcx.types.i8,
394 (I8, true) => tcx.types.i8,
395 (I16, true) => tcx.types.i16,
396 (I32, true) => tcx.types.i32,
397 (I64, true) => tcx.types.i64,
398 (I128, true) => tcx.types.i128,
402 /// Find the smallest Integer type which can represent the signed value.
403 pub fn fit_signed(x: i64) -> Integer {
405 -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1,
406 -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
407 -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
408 -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
409 -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
414 /// Find the smallest Integer type which can represent the unsigned value.
415 pub fn fit_unsigned(x: u64) -> Integer {
417 0...0x0000_0000_0000_0001 => I1,
418 0...0x0000_0000_0000_00ff => I8,
419 0...0x0000_0000_0000_ffff => I16,
420 0...0x0000_0000_ffff_ffff => I32,
421 0...0xffff_ffff_ffff_ffff => I64,
426 /// Find the smallest integer with the given alignment.
427 pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
428 let dl = cx.data_layout();
430 let wanted = align.abi();
431 for &candidate in &[I8, I16, I32, I64] {
432 let ty = Int(candidate);
433 if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
434 return Some(candidate);
440 /// Get the Integer type from an attr::IntType.
441 pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
442 let dl = cx.data_layout();
445 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
446 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
447 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
448 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
449 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
450 attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
451 dl.ptr_sized_integer()
456 /// Find the appropriate Integer type and signedness for the given
457 /// signed discriminant range and #[repr] attribute.
458 /// N.B.: u64 values above i64::MAX will be treated as signed, but
459 /// that shouldn't affect anything, other than maybe debuginfo.
460 fn repr_discr(tcx: TyCtxt, ty: Ty, repr: &ReprOptions, min: i64, max: i64)
462 // Theoretically, negative values could be larger in unsigned representation
463 // than the unsigned representation of the signed minimum. However, if there
464 // are any negative values, the only valid unsigned representation is u64
465 // which can fit all i64 values, so the result remains unaffected.
466 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
467 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
469 let mut min_from_extern = None;
470 let min_default = I8;
472 if let Some(ity) = repr.int {
473 let discr = Integer::from_attr(tcx, ity);
474 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
476 bug!("Integer::repr_discr: `#[repr]` hint too small for \
477 discriminant range of enum `{}", ty)
479 return (discr, ity.is_signed());
483 match &tcx.sess.target.target.arch[..] {
484 // WARNING: the ARM EABI has two variants; the one corresponding
485 // to `at_least == I32` appears to be used on Linux and NetBSD,
486 // but some systems may use the variant corresponding to no
487 // lower bound. However, we don't run on those yet...?
488 "arm" => min_from_extern = Some(I32),
489 _ => min_from_extern = Some(I32),
493 let at_least = min_from_extern.unwrap_or(min_default);
495 // If there are no negative values, we can use the unsigned fit.
497 (cmp::max(unsigned_fit, at_least), false)
499 (cmp::max(signed_fit, at_least), true)
504 /// Fundamental unit of memory access and layout.
505 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
514 pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
515 let dl = cx.data_layout();
518 Int(I1) | Int(I8) => Size::from_bits(8),
519 Int(I16) => Size::from_bits(16),
520 Int(I32) | F32 => Size::from_bits(32),
521 Int(I64) | F64 => Size::from_bits(64),
522 Int(I128) => Size::from_bits(128),
523 Pointer => dl.pointer_size
527 pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
528 let dl = cx.data_layout();
531 Int(I1) => dl.i1_align,
532 Int(I8) => dl.i8_align,
533 Int(I16) => dl.i16_align,
534 Int(I32) => dl.i32_align,
535 Int(I64) => dl.i64_align,
536 Int(I128) => dl.i128_align,
539 Pointer => dl.pointer_align
544 /// Path through fields of nested structures.
545 // FIXME(eddyb) use small vector optimization for the common case.
546 pub type FieldPath = Vec<u32>;
548 /// A structure, a product type in ADT terms.
549 #[derive(PartialEq, Eq, Hash, Debug)]
553 /// If true, no alignment padding is used.
556 /// If true, the size is exact, otherwise it's only a lower bound.
559 /// Offsets for the first byte of each field, ordered to match the source definition order.
560 /// This vector does not go in increasing order.
561 /// FIXME(eddyb) use small vector optimization for the common case.
562 pub offsets: Vec<Size>,
564 /// Maps source order field indices to memory order indices, depending how fields were permuted.
565 /// FIXME (camlorn) also consider small vector optimization here.
566 pub memory_index: Vec<u32>,
571 // Info required to optimize struct layout.
572 #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
574 // A tuple, closure, or univariant which cannot be coerced to unsized.
575 AlwaysSizedUnivariant,
576 // A univariant, the last field of which may be coerced to unsized.
577 MaybeUnsizedUnivariant,
578 // A univariant, but part of an enum.
582 impl<'a, 'gcx, 'tcx> Struct {
583 // FIXME(camlorn): reprs need a better representation to deal with multiple reprs on one type.
584 fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
585 repr: &ReprOptions, kind: StructKind,
586 scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
587 let packed = repr.packed;
588 let mut ret = Struct {
589 align: if packed { dl.i8_align } else { dl.aggregate_align },
593 memory_index: vec![],
594 min_size: Size::from_bytes(0),
597 // Anything with repr(C) or repr(packed) doesn't optimize.
598 // Neither do 1-member and 2-member structs.
599 // In addition, code in trans assume that 2-element structs can become pairs.
600 // It's easier to just short-circuit here.
601 let mut can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
602 && ! (repr.c || repr.packed);
604 // Disable field reordering until we can decide what to do.
605 // The odd pattern here avoids a warning about the value never being read.
606 if can_optimize { can_optimize = false; }
608 let (optimize, sort_ascending) = match kind {
609 StructKind::AlwaysSizedUnivariant => (can_optimize, false),
610 StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
611 StructKind::EnumVariant => {
612 assert!(fields.len() >= 1, "Enum variants must have discriminants.");
613 (can_optimize && fields[0].size(dl).bytes() == 1, true)
617 ret.offsets = vec![Size::from_bytes(0); fields.len()];
618 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
621 let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
622 let end = if let StructKind::MaybeUnsizedUnivariant = kind {
628 let optimizing = &mut inverse_memory_index[start..end];
630 optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
632 optimizing.sort_by(| &a, &b | {
633 let a = fields[a as usize].align(dl).abi();
634 let b = fields[b as usize].align(dl).abi();
641 // inverse_memory_index holds field indices by increasing memory offset.
642 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
643 // We now write field offsets to the corresponding offset slot;
644 // field 5 with offset 0 puts 0 in offsets[5].
645 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
647 if let StructKind::EnumVariant = kind {
648 assert_eq!(inverse_memory_index[0], 0,
649 "Enum variant discriminants must have the lowest offset.");
652 let mut offset = Size::from_bytes(0);
654 for i in inverse_memory_index.iter() {
655 let field = fields[*i as usize];
657 bug!("Struct::new: field #{} of `{}` comes after unsized field",
658 ret.offsets.len(), scapegoat);
661 if field.is_unsized() {
665 // Invariant: offset < dl.obj_size_bound() <= 1<<61
667 let align = field.align(dl);
668 ret.align = ret.align.max(align);
669 offset = offset.abi_align(align);
672 debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
673 ret.offsets[*i as usize] = offset;
675 offset = offset.checked_add(field.size(dl), dl)
676 .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
680 debug!("Struct::new min_size: {:?}", offset);
681 ret.min_size = offset;
683 // As stated above, inverse_memory_index holds field indices by increasing offset.
684 // This makes it an already-sorted view of the offsets vec.
685 // To invert it, consider:
686 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
687 // Field 5 would be the first element, so memory_index is i:
688 // Note: if we didn't optimize, it's already right.
691 ret.memory_index = vec![0; inverse_memory_index.len()];
693 for i in 0..inverse_memory_index.len() {
694 ret.memory_index[inverse_memory_index[i] as usize] = i as u32;
697 ret.memory_index = inverse_memory_index;
703 /// Get the size with trailing alignment padding.
704 pub fn stride(&self) -> Size {
705 self.min_size.abi_align(self.align)
708 /// Determine whether a structure would be zero-sized, given its fields.
709 fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
710 -> Result<bool, LayoutError<'gcx>>
711 where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
712 for field in fields {
714 if field.is_unsized() || field.size(dl).bytes() > 0 {
721 /// Get indices of the tys that made this struct by increasing offset.
723 pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
724 let mut inverse_small = [0u8; 64];
725 let mut inverse_big = vec![];
726 let use_small = self.memory_index.len() <= inverse_small.len();
728 // We have to write this logic twice in order to keep the array small.
730 for i in 0..self.memory_index.len() {
731 inverse_small[self.memory_index[i] as usize] = i as u8;
734 inverse_big = vec![0; self.memory_index.len()];
735 for i in 0..self.memory_index.len() {
736 inverse_big[self.memory_index[i] as usize] = i as u32;
740 (0..self.memory_index.len()).map(move |i| {
741 if use_small { inverse_small[i] as usize }
742 else { inverse_big[i] as usize }
746 /// Find the path leading to a non-zero leaf field, starting from
747 /// the given type and recursing through aggregates.
748 /// The tuple is `(path, source_path)`,
749 /// where `path` is in memory order and `source_path` in source order.
750 // FIXME(eddyb) track value ranges and traverse already optimized enums.
751 fn non_zero_field_in_type(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
753 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'gcx>> {
754 let tcx = infcx.tcx.global_tcx();
755 match (ty.layout(infcx)?, &ty.sty) {
756 (&Scalar { non_zero: true, .. }, _) |
757 (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
758 (&FatPointer { non_zero: true, .. }, _) => {
759 Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
762 // Is this the NonZero lang item wrapping a pointer or integer type?
763 (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
764 let fields = &def.struct_variant().fields;
765 assert_eq!(fields.len(), 1);
766 match *fields[0].ty(tcx, substs).layout(infcx)? {
767 // FIXME(eddyb) also allow floating-point types here.
768 Scalar { value: Int(_), non_zero: false } |
769 Scalar { value: Pointer, non_zero: false } => {
770 Ok(Some((vec![0], vec![0])))
772 FatPointer { non_zero: false, .. } => {
773 let tmp = vec![FAT_PTR_ADDR as u32, 0];
774 Ok(Some((tmp.clone(), tmp)))
780 // Perhaps one of the fields of this struct is non-zero
781 // let's recurse and find out
782 (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
783 Struct::non_zero_field_paths(infcx, def.struct_variant().fields
784 .iter().map(|field| {
785 field.ty(tcx, substs)
787 Some(&variant.memory_index[..]))
790 // Perhaps one of the upvars of this closure is non-zero
791 (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
792 let upvar_tys = substs.upvar_tys(def, tcx);
793 Struct::non_zero_field_paths(infcx, upvar_tys,
794 Some(&variant.memory_index[..]))
796 // Can we use one of the fields in this tuple?
797 (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
798 Struct::non_zero_field_paths(infcx, tys.iter().cloned(),
799 Some(&variant.memory_index[..]))
802 // Is this a fixed-size array of something non-zero
803 // with at least one element?
804 (_, &ty::TyArray(ety, d)) if d > 0 => {
805 Struct::non_zero_field_paths(infcx, Some(ety).into_iter(), None)
808 (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
809 let normalized = normalize_associated_type(infcx, ty);
810 if ty == normalized {
813 return Struct::non_zero_field_in_type(infcx, normalized);
816 // Anything else is not a non-zero type.
821 /// Find the path leading to a non-zero leaf field, starting from
822 /// the given set of fields and recursing through aggregates.
823 /// Returns Some((path, source_path)) on success.
824 /// `path` is translated to memory order. `source_path` is not.
825 fn non_zero_field_paths<I>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
827 permutation: Option<&[u32]>)
828 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'gcx>>
829 where I: Iterator<Item=Ty<'gcx>> {
830 for (i, ty) in fields.enumerate() {
831 if let Some((mut path, mut source_path)) = Struct::non_zero_field_in_type(infcx, ty)? {
832 source_path.push(i as u32);
833 let index = if let Some(p) = permutation {
838 path.push(index as u32);
839 return Ok(Some((path, source_path)));
846 /// An untagged union.
847 #[derive(PartialEq, Eq, Hash, Debug)]
853 /// If true, no alignment padding is used.
857 impl<'a, 'gcx, 'tcx> Union {
858 fn new(dl: &TargetDataLayout, packed: bool) -> Union {
860 align: if packed { dl.i8_align } else { dl.aggregate_align },
861 min_size: Size::from_bytes(0),
866 /// Extend the Struct with more fields.
867 fn extend<I>(&mut self, dl: &TargetDataLayout,
870 -> Result<(), LayoutError<'gcx>>
871 where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
872 for (index, field) in fields.enumerate() {
874 if field.is_unsized() {
875 bug!("Union::extend: field #{} of `{}` is unsized",
879 debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
882 self.align = self.align.max(field.align(dl));
884 self.min_size = cmp::max(self.min_size, field.size(dl));
887 debug!("Union::extend min-size: {:?}", self.min_size);
892 /// Get the size with trailing alignment padding.
893 pub fn stride(&self) -> Size {
894 self.min_size.abi_align(self.align)
898 /// The first half of a fat pointer.
899 /// - For a trait object, this is the address of the box.
900 /// - For a slice, this is the base address.
901 pub const FAT_PTR_ADDR: usize = 0;
903 /// The second half of a fat pointer.
904 /// - For a trait object, this is the address of the vtable.
905 /// - For a slice, this is the length.
906 pub const FAT_PTR_EXTRA: usize = 1;
908 /// Type layout, from which size and alignment can be cheaply computed.
909 /// For ADTs, it also includes field placement and enum optimizations.
910 /// NOTE: Because Layout is interned, redundant information should be
911 /// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
912 #[derive(Debug, PartialEq, Eq, Hash)]
914 /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
917 // If true, the value cannot represent a bit pattern of all zeroes.
921 /// SIMD vectors, from structs marked with #[repr(simd)].
927 /// TyArray, TySlice or TyStr.
929 /// If true, the size is exact, otherwise it's only a lower bound.
936 /// TyRawPtr or TyRef with a !Sized pointee.
939 // If true, the pointer cannot be null.
943 // Remaining variants are all ADTs such as structs, enums or tuples.
945 /// C-like enums; basically an integer.
950 // Inclusive discriminant range.
951 // If min > max, it represents min...u64::MAX followed by 0...max.
952 // FIXME(eddyb) always use the shortest range, e.g. by finding
953 // the largest space between two consecutive discriminants and
954 // taking everything else as the (shortest) discriminant range.
959 /// Single-case enums, and structs/tuples.
962 // If true, the structure is NonZero.
963 // FIXME(eddyb) use a newtype Layout kind for this.
972 /// General-case enums: for each case there is a struct, and they
973 /// all start with a field for the discriminant.
976 variants: Vec<Struct>,
981 /// Two cases distinguished by a nullable pointer: the case with discriminant
982 /// `nndiscr` must have single field which is known to be nonnull due to its type.
983 /// The other case is known to be zero sized. Hence we represent the enum
984 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
985 /// otherwise it indicates the other case.
987 /// For example, `std::option::Option` instantiated at a safe pointer type
988 /// is represented such that `None` is a null pointer and `Some` is the
989 /// identity function.
995 /// Two cases distinguished by a nullable pointer: the case with discriminant
996 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
997 /// field is known to be nonnull due to its type; if that field is null, then
998 /// it represents the other case, which is known to be zero sized.
999 StructWrappedNullablePointer {
1002 // N.B. There is a 0 at the start, for LLVM GEP through a pointer.
1003 discrfield: FieldPath,
1004 // Like discrfield, but in source order. For debuginfo.
1005 discrfield_source: FieldPath
1009 #[derive(Copy, Clone, Debug)]
1010 pub enum LayoutError<'tcx> {
1012 SizeOverflow(Ty<'tcx>)
1015 impl<'tcx> fmt::Display for LayoutError<'tcx> {
1016 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1018 LayoutError::Unknown(ty) => {
1019 write!(f, "the type `{:?}` has an unknown layout", ty)
1021 LayoutError::SizeOverflow(ty) => {
1022 write!(f, "the type `{:?}` is too big for the current architecture", ty)
1028 /// Helper function for normalizing associated types in an inference context.
1029 fn normalize_associated_type<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
1032 if !ty.has_projection_types() {
1036 let mut selcx = traits::SelectionContext::new(infcx);
1037 let cause = traits::ObligationCause::dummy();
1038 let traits::Normalized { value: result, obligations } =
1039 traits::normalize(&mut selcx, cause, &ty);
1041 let mut fulfill_cx = traits::FulfillmentContext::new();
1043 for obligation in obligations {
1044 fulfill_cx.register_predicate_obligation(infcx, obligation);
1047 infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
1050 impl<'a, 'gcx, 'tcx> Layout {
1051 pub fn compute_uncached(ty: Ty<'gcx>,
1052 infcx: &InferCtxt<'a, 'gcx, 'tcx>)
1053 -> Result<&'gcx Layout, LayoutError<'gcx>> {
1054 let tcx = infcx.tcx.global_tcx();
1055 let success = |layout| Ok(tcx.intern_layout(layout));
1056 let dl = &tcx.data_layout;
1057 assert!(!ty.has_infer_types());
1059 let ptr_layout = |pointee: Ty<'gcx>| {
1060 let non_zero = !ty.is_unsafe_ptr();
1061 let pointee = normalize_associated_type(infcx, pointee);
1062 if pointee.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
1063 Ok(Scalar { value: Pointer, non_zero: non_zero })
1065 let unsized_part = tcx.struct_tail(pointee);
1066 let meta = match unsized_part.sty {
1067 ty::TySlice(_) | ty::TyStr => {
1068 Int(dl.ptr_sized_integer())
1070 ty::TyDynamic(..) => Pointer,
1071 _ => return Err(LayoutError::Unknown(unsized_part))
1073 Ok(FatPointer { metadata: meta, non_zero: non_zero })
1077 let layout = match ty.sty {
1079 ty::TyBool => Scalar { value: Int(I1), non_zero: false },
1080 ty::TyChar => Scalar { value: Int(I32), non_zero: false },
1083 value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
1087 ty::TyUint(ity) => {
1089 value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
1093 ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
1094 ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
1095 ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
1098 ty::TyNever => Univariant {
1099 variant: Struct::new(dl, &vec![], &ReprOptions::default(),
1100 StructKind::AlwaysSizedUnivariant, ty)?,
1104 // Potentially-fat pointers.
1105 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1106 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1107 ptr_layout(pointee)?
1109 ty::TyAdt(def, _) if def.is_box() => {
1110 ptr_layout(ty.boxed_ty())?
1113 // Arrays and slices.
1114 ty::TyArray(element, count) => {
1115 let element = element.layout(infcx)?;
1116 let element_size = element.size(dl);
1117 // FIXME(eddyb) Don't use host `usize` for array lengths.
1118 let usize_count: usize = count;
1119 let count = usize_count as u64;
1120 if element_size.checked_mul(count, dl).is_none() {
1121 return Err(LayoutError::SizeOverflow(ty));
1125 align: element.align(dl),
1126 element_size: element_size,
1130 ty::TySlice(element) => {
1131 let element = element.layout(infcx)?;
1134 align: element.align(dl),
1135 element_size: element.size(dl),
1143 element_size: Size::from_bytes(1),
1149 ty::TyFnDef(..) => {
1151 variant: Struct::new(dl, &vec![],
1152 &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?,
1156 ty::TyDynamic(..) => {
1157 let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(),
1158 StructKind::AlwaysSizedUnivariant, ty)?;
1160 Univariant { variant: unit, non_zero: false }
1163 // Tuples and closures.
1164 ty::TyClosure(def_id, ref substs) => {
1165 let tys = substs.upvar_tys(def_id, tcx);
1166 let st = Struct::new(dl,
1167 &tys.map(|ty| ty.layout(infcx))
1168 .collect::<Result<Vec<_>, _>>()?,
1169 &ReprOptions::default(),
1170 StructKind::AlwaysSizedUnivariant, ty)?;
1171 Univariant { variant: st, non_zero: false }
1174 ty::TyTuple(tys, _) => {
1175 // FIXME(camlorn): if we ever allow unsized tuples, this needs to be checked.
1176 // See the univariant case below to learn how.
1177 let st = Struct::new(dl,
1178 &tys.iter().map(|ty| ty.layout(infcx))
1179 .collect::<Result<Vec<_>, _>>()?,
1180 &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?;
1181 Univariant { variant: st, non_zero: false }
1184 // SIMD vector types.
1185 ty::TyAdt(def, ..) if def.repr.simd => {
1186 let element = ty.simd_type(tcx);
1187 match *element.layout(infcx)? {
1188 Scalar { value, .. } => {
1189 return success(Vector {
1191 count: ty.simd_size(tcx) as u64
1195 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1196 a non-machine element type `{}`",
1203 ty::TyAdt(def, substs) => {
1204 if def.variants.is_empty() {
1205 // Uninhabitable; represent as unit
1206 // (Typechecking will reject discriminant-sizing attrs.)
1208 return success(Univariant {
1209 variant: Struct::new(dl, &vec![],
1210 &def.repr, StructKind::AlwaysSizedUnivariant, ty)?,
1215 if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
1216 // All bodies empty -> intlike
1217 let (mut min, mut max, mut non_zero) = (i64::max_value(),
1220 for discr in def.discriminants(tcx) {
1221 let x = discr.to_u128_unchecked() as i64;
1222 if x == 0 { non_zero = false; }
1223 if x < min { min = x; }
1224 if x > max { max = x; }
1227 // FIXME: should handle i128? signed-value based impl is weird and hard to
1229 let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1230 return success(CEnum {
1234 // FIXME: should be u128?
1240 if !def.is_enum() || (def.variants.len() == 1 &&
1241 !def.repr.inhibit_enum_layout_opt()) {
1242 // Struct, or union, or univariant enum equivalent to a struct.
1243 // (Typechecking will reject discriminant-sizing attrs.)
1245 let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
1246 StructKind::AlwaysSizedUnivariant
1248 use middle::region::ROOT_CODE_EXTENT;
1249 let param_env = tcx.construct_parameter_environment(DUMMY_SP,
1250 def.did, ROOT_CODE_EXTENT);
1251 let fields = &def.variants[0].fields;
1252 let last_field = &fields[fields.len()-1];
1253 let always_sized = last_field.ty(tcx, param_env.free_substs)
1254 .is_sized(tcx, ¶m_env, DUMMY_SP);
1255 if !always_sized { StructKind::MaybeUnsizedUnivariant }
1256 else { StructKind::AlwaysSizedUnivariant }
1259 let fields = def.variants[0].fields.iter().map(|field| {
1260 field.ty(tcx, substs).layout(infcx)
1261 }).collect::<Result<Vec<_>, _>>()?;
1262 let layout = if def.is_union() {
1263 let mut un = Union::new(dl, def.repr.packed);
1264 un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
1265 UntaggedUnion { variants: un }
1267 let st = Struct::new(dl, &fields, &def.repr,
1269 let non_zero = Some(def.did) == tcx.lang_items.non_zero();
1270 Univariant { variant: st, non_zero: non_zero }
1272 return success(layout);
1275 // Since there's at least one
1276 // non-empty body, explicit discriminants should have
1277 // been rejected by a checker before this point.
1278 for (i, v) in def.variants.iter().enumerate() {
1279 if v.discr != ty::VariantDiscr::Relative(i) {
1280 bug!("non-C-like enum {} with specified discriminants",
1281 tcx.item_path_str(def.did));
1285 // Cache the substituted and normalized variant field types.
1286 let variants = def.variants.iter().map(|v| {
1287 v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
1288 }).collect::<Vec<_>>();
1290 if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() {
1291 // Nullable pointer optimization
1293 let other_fields = variants[1 - discr].iter().map(|ty| {
1296 if !Struct::would_be_zero_sized(dl, other_fields)? {
1299 let paths = Struct::non_zero_field_paths(infcx,
1300 variants[discr].iter().cloned(),
1302 let (mut path, mut path_source) = if let Some(p) = paths { p }
1305 // FIXME(eddyb) should take advantage of a newtype.
1306 if path == &[0] && variants[discr].len() == 1 {
1307 let value = match *variants[discr][0].layout(infcx)? {
1308 Scalar { value, .. } => value,
1309 CEnum { discr, .. } => Int(discr),
1310 _ => bug!("Layout::compute: `{}`'s non-zero \
1311 `{}` field not scalar?!",
1312 ty, variants[discr][0])
1314 return success(RawNullablePointer {
1315 nndiscr: discr as u64,
1320 let st = Struct::new(dl,
1321 &variants[discr].iter().map(|ty| ty.layout(infcx))
1322 .collect::<Result<Vec<_>, _>>()?,
1323 &def.repr, StructKind::AlwaysSizedUnivariant, ty)?;
1325 // We have to fix the last element of path here.
1326 let mut i = *path.last().unwrap();
1327 i = st.memory_index[i as usize];
1328 *path.last_mut().unwrap() = i;
1329 path.push(0); // For GEP through a pointer.
1331 path_source.push(0);
1332 path_source.reverse();
1334 return success(StructWrappedNullablePointer {
1335 nndiscr: discr as u64,
1338 discrfield_source: path_source
1343 // The general case.
1344 let discr_max = (variants.len() - 1) as i64;
1345 assert!(discr_max >= 0);
1346 let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
1347 let mut align = dl.aggregate_align;
1348 let mut size = Size::from_bytes(0);
1350 // We're interested in the smallest alignment, so start large.
1351 let mut start_align = Align::from_bytes(256, 256).unwrap();
1353 // Create the set of structs that represent each variant
1354 // Use the minimum integer type we figured out above
1355 let discr = Scalar { value: Int(min_ity), non_zero: false };
1356 let mut variants = variants.into_iter().map(|fields| {
1357 let mut fields = fields.into_iter().map(|field| {
1359 }).collect::<Result<Vec<_>, _>>()?;
1360 fields.insert(0, &discr);
1361 let st = Struct::new(dl,
1363 &def.repr, StructKind::EnumVariant, ty)?;
1364 // Find the first field we can't move later
1365 // to make room for a larger discriminant.
1366 // It is important to skip the first field.
1367 for i in st.field_index_by_increasing_offset().skip(1) {
1368 let field = fields[i];
1369 let field_align = field.align(dl);
1370 if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1371 start_align = start_align.min(field_align);
1375 size = cmp::max(size, st.min_size);
1376 align = align.max(st.align);
1378 }).collect::<Result<Vec<_>, _>>()?;
1380 // Align the maximum variant size to the largest alignment.
1381 size = size.abi_align(align);
1383 if size.bytes() >= dl.obj_size_bound() {
1384 return Err(LayoutError::SizeOverflow(ty));
1387 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1388 if typeck_ity < min_ity {
1389 // It is a bug if Layout decided on a greater discriminant size than typeck for
1390 // some reason at this point (based on values discriminant can take on). Mostly
1391 // because this discriminant will be loaded, and then stored into variable of
1392 // type calculated by typeck. Consider such case (a bug): typeck decided on
1393 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1394 // discriminant values. That would be a bug, because then, in trans, in order
1395 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1396 // space necessary to represent would have to be discarded (or layout is wrong
1397 // on thinking it needs 16 bits)
1398 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1399 min_ity, typeck_ity);
1400 // However, it is fine to make discr type however large (as an optimisation)
1401 // after this point – we’ll just truncate the value we load in trans.
1404 // Check to see if we should use a different type for the
1405 // discriminant. We can safely use a type with the same size
1406 // as the alignment of the first field of each variant.
1407 // We increase the size of the discriminant to avoid LLVM copying
1408 // padding when it doesn't need to. This normally causes unaligned
1409 // load/stores and excessive memcpy/memset operations. By using a
1410 // bigger integer size, LLVM can be sure about it's contents and
1411 // won't be so conservative.
1413 // Use the initial field alignment
1414 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
1416 // If the alignment is not larger than the chosen discriminant size,
1417 // don't use the alignment as the final size.
1421 // Patch up the variants' first few fields.
1422 let old_ity_size = Int(min_ity).size(dl);
1423 let new_ity_size = Int(ity).size(dl);
1424 for variant in &mut variants {
1425 for i in variant.offsets.iter_mut() {
1426 // The first field is the discrimminant, at offset 0.
1427 // These aren't in order, and we need to skip it.
1428 if *i <= old_ity_size && *i > Size::from_bytes(0) {
1432 // We might be making the struct larger.
1433 if variant.min_size <= old_ity_size {
1434 variant.min_size = new_ity_size;
1447 // Types with no meaningful known layout.
1448 ty::TyProjection(_) | ty::TyAnon(..) => {
1449 let normalized = normalize_associated_type(infcx, ty);
1450 if ty == normalized {
1451 return Err(LayoutError::Unknown(ty));
1453 return normalized.layout(infcx);
1456 return Err(LayoutError::Unknown(ty));
1458 ty::TyInfer(_) | ty::TyError => {
1459 bug!("Layout::compute: unexpected type `{}`", ty)
1466 /// Returns true if the layout corresponds to an unsized type.
1467 pub fn is_unsized(&self) -> bool {
1469 Scalar {..} | Vector {..} | FatPointer {..} |
1470 CEnum {..} | UntaggedUnion {..} | General {..} |
1471 RawNullablePointer {..} |
1472 StructWrappedNullablePointer {..} => false,
1474 Array { sized, .. } |
1475 Univariant { variant: Struct { sized, .. }, .. } => !sized
1479 pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
1480 let dl = cx.data_layout();
1483 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1487 Vector { element, count } => {
1488 let element_size = element.size(dl);
1489 let vec_size = match element_size.checked_mul(count, dl) {
1491 None => bug!("Layout::size({:?}): {} * {} overflowed",
1492 self, element_size.bytes(), count)
1494 vec_size.abi_align(self.align(dl))
1497 Array { element_size, count, .. } => {
1498 match element_size.checked_mul(count, dl) {
1500 None => bug!("Layout::size({:?}): {} * {} overflowed",
1501 self, element_size.bytes(), count)
1505 FatPointer { metadata, .. } => {
1506 // Effectively a (ptr, meta) tuple.
1507 Pointer.size(dl).abi_align(metadata.align(dl))
1508 .checked_add(metadata.size(dl), dl).unwrap()
1509 .abi_align(self.align(dl))
1512 CEnum { discr, .. } => Int(discr).size(dl),
1513 General { size, .. } => size,
1514 UntaggedUnion { ref variants } => variants.stride(),
1516 Univariant { ref variant, .. } |
1517 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1523 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
1524 let dl = cx.data_layout();
1527 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1531 Vector { element, count } => {
1532 let elem_size = element.size(dl);
1533 let vec_size = match elem_size.checked_mul(count, dl) {
1535 None => bug!("Layout::align({:?}): {} * {} overflowed",
1536 self, elem_size.bytes(), count)
1538 for &(size, align) in &dl.vector_align {
1539 if size == vec_size {
1543 // Default to natural alignment, which is what LLVM does.
1544 // That is, use the size, rounded up to a power of 2.
1545 let align = vec_size.bytes().next_power_of_two();
1546 Align::from_bytes(align, align).unwrap()
1549 FatPointer { metadata, .. } => {
1550 // Effectively a (ptr, meta) tuple.
1551 Pointer.align(dl).max(metadata.align(dl))
1554 CEnum { discr, .. } => Int(discr).align(dl),
1555 Array { align, .. } | General { align, .. } => align,
1556 UntaggedUnion { ref variants } => variants.align,
1558 Univariant { ref variant, .. } |
1559 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1565 pub fn field_offset<C: HasDataLayout>(&self,
1568 variant_index: Option<usize>)
1570 let dl = cx.data_layout();
1575 UntaggedUnion { .. } |
1576 RawNullablePointer { .. } => {
1580 Vector { element, count } => {
1581 let element_size = element.size(dl);
1584 Size::from_bytes(element_size.bytes() * count)
1587 Array { element_size, count, .. } => {
1590 Size::from_bytes(element_size.bytes() * count)
1593 FatPointer { metadata, .. } => {
1594 // Effectively a (ptr, meta) tuple.
1599 Pointer.size(dl).abi_align(metadata.align(dl))
1603 Univariant { ref variant, .. } => variant.offsets[i],
1605 General { ref variants, .. } => {
1606 let v = variant_index.expect("variant index required");
1607 variants[v].offsets[i + 1]
1610 StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
1611 if Some(nndiscr as usize) == variant_index {
1621 /// Type size "skeleton", i.e. the only information determining a type's size.
1622 /// While this is conservative, (aside from constant sizes, only pointers,
1623 /// newtypes thereof and null pointer optimized enums are allowed), it is
1624 /// enough to statically check common usecases of transmute.
1625 #[derive(Copy, Clone, Debug)]
1626 pub enum SizeSkeleton<'tcx> {
1627 /// Any statically computable Layout.
1630 /// A potentially-fat pointer.
1632 // If true, this pointer is never null.
1634 // The type which determines the unsized metadata, if any,
1635 // of this pointer. Either a type parameter or a projection
1636 // depending on one, with regions erased.
1641 impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> {
1642 pub fn compute(ty: Ty<'gcx>, infcx: &InferCtxt<'a, 'gcx, 'tcx>)
1643 -> Result<SizeSkeleton<'gcx>, LayoutError<'gcx>> {
1644 let tcx = infcx.tcx.global_tcx();
1645 assert!(!ty.has_infer_types());
1647 // First try computing a static layout.
1648 let err = match ty.layout(infcx) {
1650 return Ok(SizeSkeleton::Known(layout.size(tcx)));
1655 let ptr_skeleton = |pointee: Ty<'gcx>| {
1656 let non_zero = !ty.is_unsafe_ptr();
1657 let tail = tcx.struct_tail(pointee);
1659 ty::TyParam(_) | ty::TyProjection(_) => {
1660 assert!(tail.has_param_types() || tail.has_self_ty());
1661 Ok(SizeSkeleton::Pointer {
1663 tail: tcx.erase_regions(&tail)
1667 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1668 tail `{}` is not a type parameter or a projection",
1675 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1676 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1677 ptr_skeleton(pointee)
1679 ty::TyAdt(def, _) if def.is_box() => {
1680 ptr_skeleton(ty.boxed_ty())
1683 ty::TyAdt(def, substs) => {
1684 // Only newtypes and enums w/ nullable pointer optimization.
1685 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1689 // Get a zero-sized variant or a pointer newtype.
1690 let zero_or_ptr_variant = |i: usize| {
1691 let fields = def.variants[i].fields.iter().map(|field| {
1692 SizeSkeleton::compute(field.ty(tcx, substs), infcx)
1695 for field in fields {
1698 SizeSkeleton::Known(size) => {
1699 if size.bytes() > 0 {
1703 SizeSkeleton::Pointer {..} => {
1714 let v0 = zero_or_ptr_variant(0)?;
1716 if def.variants.len() == 1 {
1717 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1718 return Ok(SizeSkeleton::Pointer {
1719 non_zero: non_zero ||
1720 Some(def.did) == tcx.lang_items.non_zero(),
1728 let v1 = zero_or_ptr_variant(1)?;
1729 // Nullable pointer enum optimization.
1731 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1732 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1733 Ok(SizeSkeleton::Pointer {
1742 ty::TyProjection(_) | ty::TyAnon(..) => {
1743 let normalized = normalize_associated_type(infcx, ty);
1744 if ty == normalized {
1747 SizeSkeleton::compute(normalized, infcx)
1755 pub fn same_size(self, other: SizeSkeleton) -> bool {
1756 match (self, other) {
1757 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1758 (SizeSkeleton::Pointer { tail: a, .. },
1759 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1765 /// A pair of a type and its layout. Implements various
1766 /// type traversal APIs (e.g. recursing into fields).
1767 #[derive(Copy, Clone, Debug)]
1768 pub struct TyLayout<'tcx> {
1770 pub layout: &'tcx Layout,
1771 pub variant_index: Option<usize>,
1774 impl<'tcx> Deref for TyLayout<'tcx> {
1775 type Target = Layout;
1776 fn deref(&self) -> &Layout {
1781 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1782 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1785 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1786 fn data_layout(&self) -> &TargetDataLayout {
1791 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1792 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1797 impl<'a, 'gcx, 'tcx> HasDataLayout for &'a InferCtxt<'a, 'gcx, 'tcx> {
1798 fn data_layout(&self) -> &TargetDataLayout {
1799 &self.tcx.data_layout
1803 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> {
1804 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1805 self.tcx.global_tcx()
1809 pub trait LayoutTyper<'tcx>: HasTyCtxt<'tcx> {
1812 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
1815 impl<'a, 'gcx, 'tcx> LayoutTyper<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> {
1816 type TyLayout = Result<TyLayout<'gcx>, LayoutError<'gcx>>;
1818 fn layout_of(self, ty: Ty<'gcx>) -> Self::TyLayout {
1819 let ty = normalize_associated_type(self, ty);
1823 layout: ty.layout(self)?,
1829 impl<'a, 'tcx> TyLayout<'tcx> {
1830 pub fn for_variant(&self, variant_index: usize) -> Self {
1832 variant_index: Some(variant_index),
1837 pub fn field_offset<C: HasDataLayout>(&self, cx: C, i: usize) -> Size {
1838 self.layout.field_offset(cx, i, self.variant_index)
1841 pub fn field_count(&self) -> usize {
1842 // Handle enum/union through the type rather than Layout.
1843 if let ty::TyAdt(def, _) = self.ty.sty {
1844 let v = self.variant_index.unwrap_or(0);
1845 if def.variants.is_empty() {
1849 return def.variants[v].fields.len();
1853 match *self.layout {
1855 bug!("TyLayout::field_count({:?}): not applicable", self)
1858 // Handled above (the TyAdt case).
1861 UntaggedUnion { .. } |
1862 RawNullablePointer { .. } |
1863 StructWrappedNullablePointer { .. } => bug!(),
1865 FatPointer { .. } => 2,
1867 Vector { count, .. } |
1868 Array { count, .. } => {
1869 let usize_count = count as usize;
1870 assert_eq!(usize_count as u64, count);
1874 Univariant { ref variant, .. } => variant.offsets.len(),
1878 pub fn field_type<C: HasTyCtxt<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
1881 let ptr_field_type = |pointee: Ty<'tcx>| {
1882 let slice = |element: Ty<'tcx>| {
1885 tcx.mk_mut_ptr(element)
1890 match tcx.struct_tail(pointee).sty {
1891 ty::TySlice(element) => slice(element),
1892 ty::TyStr => slice(tcx.types.u8),
1893 ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()),
1894 _ => bug!("TyLayout::field_type({:?}): not applicable", self)
1907 ty::TyDynamic(..) => {
1908 bug!("TyLayout::field_type({:?}): not applicable", self)
1911 // Potentially-fat pointers.
1912 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1913 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1914 ptr_field_type(pointee)
1916 ty::TyAdt(def, _) if def.is_box() => {
1917 ptr_field_type(self.ty.boxed_ty())
1920 // Arrays and slices.
1921 ty::TyArray(element, _) |
1922 ty::TySlice(element) => element,
1923 ty::TyStr => tcx.types.u8,
1925 // Tuples and closures.
1926 ty::TyClosure(def_id, ref substs) => {
1927 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1930 ty::TyTuple(tys, _) => tys[i],
1932 // SIMD vector types.
1933 ty::TyAdt(def, ..) if def.repr.simd => {
1934 self.ty.simd_type(tcx)
1938 ty::TyAdt(def, substs) => {
1939 def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
1942 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
1943 ty::TyInfer(_) | ty::TyError => {
1944 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
1949 pub fn field<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> C::TyLayout {
1950 cx.layout_of(self.field_type(cx, i))