1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 pub use self::Integer::*;
12 pub use self::Layout::*;
13 pub use self::Primitive::*;
15 use session::{self, DataTypeKind, Session};
16 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags};
18 use syntax::ast::{self, FloatTy, IntTy, UintTy};
20 use syntax_pos::DUMMY_SP;
29 use ich::StableHashingContext;
30 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
33 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
34 /// for a target, which contains everything needed to compute layouts.
35 pub struct TargetDataLayout {
42 pub i128_align: Align,
45 pub pointer_size: Size,
46 pub pointer_align: Align,
47 pub aggregate_align: Align,
49 /// Alignments for vector types.
50 pub vector_align: Vec<(Size, Align)>
53 impl Default for TargetDataLayout {
54 /// Creates an instance of `TargetDataLayout`.
55 fn default() -> TargetDataLayout {
58 i1_align: Align::from_bits(8, 8).unwrap(),
59 i8_align: Align::from_bits(8, 8).unwrap(),
60 i16_align: Align::from_bits(16, 16).unwrap(),
61 i32_align: Align::from_bits(32, 32).unwrap(),
62 i64_align: Align::from_bits(32, 64).unwrap(),
63 i128_align: Align::from_bits(32, 64).unwrap(),
64 f32_align: Align::from_bits(32, 32).unwrap(),
65 f64_align: Align::from_bits(64, 64).unwrap(),
66 pointer_size: Size::from_bits(64),
67 pointer_align: Align::from_bits(64, 64).unwrap(),
68 aggregate_align: Align::from_bits(0, 64).unwrap(),
70 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
71 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
77 impl TargetDataLayout {
78 pub fn parse(sess: &Session) -> TargetDataLayout {
79 // Parse a bit count from a string.
80 let parse_bits = |s: &str, kind: &str, cause: &str| {
81 s.parse::<u64>().unwrap_or_else(|err| {
82 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
83 kind, s, cause, err));
88 // Parse a size string.
89 let size = |s: &str, cause: &str| {
90 Size::from_bits(parse_bits(s, "size", cause))
93 // Parse an alignment string.
94 let align = |s: &[&str], cause: &str| {
96 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
98 let abi = parse_bits(s[0], "alignment", cause);
99 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
100 Align::from_bits(abi, pref).unwrap_or_else(|err| {
101 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
103 Align::from_bits(8, 8).unwrap()
107 let mut dl = TargetDataLayout::default();
108 let mut i128_align_src = 64;
109 for spec in sess.target.target.data_layout.split("-") {
110 match &spec.split(":").collect::<Vec<_>>()[..] {
111 &["e"] => dl.endian = Endian::Little,
112 &["E"] => dl.endian = Endian::Big,
113 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
114 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
115 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
116 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
117 dl.pointer_size = size(s, p);
118 dl.pointer_align = align(a, p);
120 &[s, ref a..] if s.starts_with("i") => {
121 let bits = match s[1..].parse::<u64>() {
124 size(&s[1..], "i"); // For the user error.
130 1 => dl.i1_align = a,
131 8 => dl.i8_align = a,
132 16 => dl.i16_align = a,
133 32 => dl.i32_align = a,
134 64 => dl.i64_align = a,
137 if bits >= i128_align_src && bits <= 128 {
138 // Default alignment for i128 is decided by taking the alignment of
139 // largest-sized i{64...128}.
140 i128_align_src = bits;
144 &[s, ref a..] if s.starts_with("v") => {
145 let v_size = size(&s[1..], "v");
147 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
151 // No existing entry, add a new one.
152 dl.vector_align.push((v_size, a));
154 _ => {} // Ignore everything else.
158 // Perform consistency checks against the Target information.
159 let endian_str = match dl.endian {
160 Endian::Little => "little",
163 if endian_str != sess.target.target.target_endian {
164 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
165 architecture is {}-endian, while \"target-endian\" is `{}`",
166 endian_str, sess.target.target.target_endian));
169 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
170 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
171 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
172 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
178 /// Return exclusive upper bound on object size.
180 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
181 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
182 /// index every address within an object along with one byte past the end, along with allowing
183 /// `isize` to store the difference between any two pointers into an object.
185 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
186 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
187 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
188 /// address space on 64-bit ARMv8 and x86_64.
189 pub fn obj_size_bound(&self) -> u64 {
190 match self.pointer_size.bits() {
194 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
198 pub fn ptr_sized_integer(&self) -> Integer {
199 match self.pointer_size.bits() {
203 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
208 pub trait HasDataLayout: Copy {
209 fn data_layout(&self) -> &TargetDataLayout;
212 impl<'a> HasDataLayout for &'a TargetDataLayout {
213 fn data_layout(&self) -> &TargetDataLayout {
218 impl<'a, 'tcx> HasDataLayout for TyCtxt<'a, 'tcx, 'tcx> {
219 fn data_layout(&self) -> &TargetDataLayout {
224 /// Endianness of the target, which must match cfg(target-endian).
225 #[derive(Copy, Clone)]
231 /// Size of a type in bytes.
232 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
238 pub fn from_bits(bits: u64) -> Size {
239 Size::from_bytes((bits + 7) / 8)
242 pub fn from_bytes(bytes: u64) -> Size {
243 if bytes >= (1 << 61) {
244 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
251 pub fn bytes(self) -> u64 {
255 pub fn bits(self) -> u64 {
259 pub fn abi_align(self, align: Align) -> Size {
260 let mask = align.abi() - 1;
261 Size::from_bytes((self.bytes() + mask) & !mask)
264 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
265 let dl = cx.data_layout();
267 // Each Size is less than dl.obj_size_bound(), so the sum is
268 // also less than 1 << 62 (and therefore can't overflow).
269 let bytes = self.bytes() + offset.bytes();
271 if bytes < dl.obj_size_bound() {
272 Some(Size::from_bytes(bytes))
278 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
279 let dl = cx.data_layout();
281 // Each Size is less than dl.obj_size_bound(), so the sum is
282 // also less than 1 << 62 (and therefore can't overflow).
283 match self.bytes().checked_mul(count) {
284 Some(bytes) if bytes < dl.obj_size_bound() => {
285 Some(Size::from_bytes(bytes))
292 /// Alignment of a type in bytes, both ABI-mandated and preferred.
293 /// Each field is a power of two, giving the alignment a maximum
294 /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with
295 /// a maximum capacity of 2^31 - 1 or 2147483647.
296 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
303 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
304 Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
307 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
308 let log2 = |align: u64| {
309 // Treat an alignment of 0 bytes like 1-byte alignment.
314 let mut bytes = align;
316 while (bytes & 1) == 0 {
321 Err(format!("`{}` is not a power of 2", align))
323 Err(format!("`{}` is too large", align))
335 pub fn abi(self) -> u64 {
339 pub fn pref(self) -> u64 {
343 pub fn min(self, other: Align) -> Align {
345 abi: cmp::min(self.abi, other.abi),
346 pref: cmp::min(self.pref, other.pref),
350 pub fn max(self, other: Align) -> Align {
352 abi: cmp::max(self.abi, other.abi),
353 pref: cmp::max(self.pref, other.pref),
358 /// Integers, also used for enum discriminants.
359 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
370 pub fn size(&self) -> Size {
372 I1 => Size::from_bits(1),
373 I8 => Size::from_bytes(1),
374 I16 => Size::from_bytes(2),
375 I32 => Size::from_bytes(4),
376 I64 => Size::from_bytes(8),
377 I128 => Size::from_bytes(16),
381 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
382 let dl = cx.data_layout();
390 I128 => dl.i128_align,
394 pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>,
395 signed: bool) -> Ty<'tcx> {
396 match (*self, signed) {
397 (I1, false) => tcx.types.u8,
398 (I8, false) => tcx.types.u8,
399 (I16, false) => tcx.types.u16,
400 (I32, false) => tcx.types.u32,
401 (I64, false) => tcx.types.u64,
402 (I128, false) => tcx.types.u128,
403 (I1, true) => tcx.types.i8,
404 (I8, true) => tcx.types.i8,
405 (I16, true) => tcx.types.i16,
406 (I32, true) => tcx.types.i32,
407 (I64, true) => tcx.types.i64,
408 (I128, true) => tcx.types.i128,
412 /// Find the smallest Integer type which can represent the signed value.
413 pub fn fit_signed(x: i64) -> Integer {
415 -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1,
416 -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
417 -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
418 -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
419 -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
424 /// Find the smallest Integer type which can represent the unsigned value.
425 pub fn fit_unsigned(x: u64) -> Integer {
427 0...0x0000_0000_0000_0001 => I1,
428 0...0x0000_0000_0000_00ff => I8,
429 0...0x0000_0000_0000_ffff => I16,
430 0...0x0000_0000_ffff_ffff => I32,
431 0...0xffff_ffff_ffff_ffff => I64,
436 /// Find the smallest integer with the given alignment.
437 pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
438 let dl = cx.data_layout();
440 let wanted = align.abi();
441 for &candidate in &[I8, I16, I32, I64] {
442 let ty = Int(candidate);
443 if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
444 return Some(candidate);
450 /// Get the Integer type from an attr::IntType.
451 pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
452 let dl = cx.data_layout();
455 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
456 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
457 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
458 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
459 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
460 attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
461 dl.ptr_sized_integer()
466 /// Find the appropriate Integer type and signedness for the given
467 /// signed discriminant range and #[repr] attribute.
468 /// N.B.: u64 values above i64::MAX will be treated as signed, but
469 /// that shouldn't affect anything, other than maybe debuginfo.
470 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
476 // Theoretically, negative values could be larger in unsigned representation
477 // than the unsigned representation of the signed minimum. However, if there
478 // are any negative values, the only valid unsigned representation is u64
479 // which can fit all i64 values, so the result remains unaffected.
480 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
481 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
483 let mut min_from_extern = None;
484 let min_default = I8;
486 if let Some(ity) = repr.int {
487 let discr = Integer::from_attr(tcx, ity);
488 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
490 bug!("Integer::repr_discr: `#[repr]` hint too small for \
491 discriminant range of enum `{}", ty)
493 return (discr, ity.is_signed());
497 match &tcx.sess.target.target.arch[..] {
498 // WARNING: the ARM EABI has two variants; the one corresponding
499 // to `at_least == I32` appears to be used on Linux and NetBSD,
500 // but some systems may use the variant corresponding to no
501 // lower bound. However, we don't run on those yet...?
502 "arm" => min_from_extern = Some(I32),
503 _ => min_from_extern = Some(I32),
507 let at_least = min_from_extern.unwrap_or(min_default);
509 // If there are no negative values, we can use the unsigned fit.
511 (cmp::max(unsigned_fit, at_least), false)
513 (cmp::max(signed_fit, at_least), true)
518 /// Fundamental unit of memory access and layout.
519 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
528 pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
529 let dl = cx.data_layout();
532 Int(I1) | Int(I8) => Size::from_bits(8),
533 Int(I16) => Size::from_bits(16),
534 Int(I32) | F32 => Size::from_bits(32),
535 Int(I64) | F64 => Size::from_bits(64),
536 Int(I128) => Size::from_bits(128),
537 Pointer => dl.pointer_size
541 pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
542 let dl = cx.data_layout();
545 Int(I1) => dl.i1_align,
546 Int(I8) => dl.i8_align,
547 Int(I16) => dl.i16_align,
548 Int(I32) => dl.i32_align,
549 Int(I64) => dl.i64_align,
550 Int(I128) => dl.i128_align,
553 Pointer => dl.pointer_align
558 /// Path through fields of nested structures.
559 // FIXME(eddyb) use small vector optimization for the common case.
560 pub type FieldPath = Vec<u32>;
562 /// A structure, a product type in ADT terms.
563 #[derive(PartialEq, Eq, Hash, Debug)]
565 /// Maximum alignment of fields and repr alignment.
568 /// Primitive alignment of fields without repr alignment.
569 pub primitive_align: Align,
571 /// If true, no alignment padding is used.
574 /// If true, the size is exact, otherwise it's only a lower bound.
577 /// Offsets for the first byte of each field, ordered to match the source definition order.
578 /// This vector does not go in increasing order.
579 /// FIXME(eddyb) use small vector optimization for the common case.
580 pub offsets: Vec<Size>,
582 /// Maps source order field indices to memory order indices, depending how fields were permuted.
583 /// FIXME (camlorn) also consider small vector optimization here.
584 pub memory_index: Vec<u32>,
589 /// Info required to optimize struct layout.
590 #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
592 /// A tuple, closure, or univariant which cannot be coerced to unsized.
593 AlwaysSizedUnivariant,
594 /// A univariant, the last field of which may be coerced to unsized.
595 MaybeUnsizedUnivariant,
596 /// A univariant, but part of an enum.
600 impl<'a, 'tcx> Struct {
601 fn new(dl: &TargetDataLayout,
602 fields: &Vec<&'a Layout>,
606 -> Result<Struct, LayoutError<'tcx>> {
607 if repr.packed() && repr.align > 0 {
608 bug!("Struct cannot be packed and aligned");
611 let align = if repr.packed() {
617 let mut ret = Struct {
619 primitive_align: align,
620 packed: repr.packed(),
623 memory_index: vec![],
624 min_size: Size::from_bytes(0),
627 // Anything with repr(C) or repr(packed) doesn't optimize.
628 // Neither do 1-member and 2-member structs.
629 // In addition, code in trans assume that 2-element structs can become pairs.
630 // It's easier to just short-circuit here.
631 let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
632 && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty();
634 let (optimize, sort_ascending) = match kind {
635 StructKind::AlwaysSizedUnivariant => (can_optimize, false),
636 StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
637 StructKind::EnumVariant => {
638 assert!(fields.len() >= 1, "Enum variants must have discriminants.");
639 (can_optimize && fields[0].size(dl).bytes() == 1, true)
643 ret.offsets = vec![Size::from_bytes(0); fields.len()];
644 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
647 let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
648 let end = if let StructKind::MaybeUnsizedUnivariant = kind {
654 let optimizing = &mut inverse_memory_index[start..end];
656 optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
658 optimizing.sort_by(| &a, &b | {
659 let a = fields[a as usize].align(dl).abi();
660 let b = fields[b as usize].align(dl).abi();
667 // inverse_memory_index holds field indices by increasing memory offset.
668 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
669 // We now write field offsets to the corresponding offset slot;
670 // field 5 with offset 0 puts 0 in offsets[5].
671 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
673 if let StructKind::EnumVariant = kind {
674 assert_eq!(inverse_memory_index[0], 0,
675 "Enum variant discriminants must have the lowest offset.");
678 let mut offset = Size::from_bytes(0);
680 for i in inverse_memory_index.iter() {
681 let field = fields[*i as usize];
683 bug!("Struct::new: field #{} of `{}` comes after unsized field",
684 ret.offsets.len(), scapegoat);
687 if field.is_unsized() {
691 // Invariant: offset < dl.obj_size_bound() <= 1<<61
693 let align = field.align(dl);
694 let primitive_align = field.primitive_align(dl);
695 ret.align = ret.align.max(align);
696 ret.primitive_align = ret.primitive_align.max(primitive_align);
697 offset = offset.abi_align(align);
700 debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
701 ret.offsets[*i as usize] = offset;
703 offset = offset.checked_add(field.size(dl), dl)
704 .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
708 let repr_align = repr.align as u64;
709 ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
710 debug!("Struct::new repr_align: {:?}", repr_align);
713 debug!("Struct::new min_size: {:?}", offset);
714 ret.min_size = offset;
716 // As stated above, inverse_memory_index holds field indices by increasing offset.
717 // This makes it an already-sorted view of the offsets vec.
718 // To invert it, consider:
719 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
720 // Field 5 would be the first element, so memory_index is i:
721 // Note: if we didn't optimize, it's already right.
724 ret.memory_index = vec![0; inverse_memory_index.len()];
726 for i in 0..inverse_memory_index.len() {
727 ret.memory_index[inverse_memory_index[i] as usize] = i as u32;
730 ret.memory_index = inverse_memory_index;
736 /// Get the size with trailing alignment padding.
737 pub fn stride(&self) -> Size {
738 self.min_size.abi_align(self.align)
741 /// Determine whether a structure would be zero-sized, given its fields.
742 fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
743 -> Result<bool, LayoutError<'tcx>>
744 where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
745 for field in fields {
747 if field.is_unsized() || field.size(dl).bytes() > 0 {
754 /// Get indices of the tys that made this struct by increasing offset.
756 pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
757 let mut inverse_small = [0u8; 64];
758 let mut inverse_big = vec![];
759 let use_small = self.memory_index.len() <= inverse_small.len();
761 // We have to write this logic twice in order to keep the array small.
763 for i in 0..self.memory_index.len() {
764 inverse_small[self.memory_index[i] as usize] = i as u8;
767 inverse_big = vec![0; self.memory_index.len()];
768 for i in 0..self.memory_index.len() {
769 inverse_big[self.memory_index[i] as usize] = i as u32;
773 (0..self.memory_index.len()).map(move |i| {
774 if use_small { inverse_small[i] as usize }
775 else { inverse_big[i] as usize }
779 /// Find the path leading to a non-zero leaf field, starting from
780 /// the given type and recursing through aggregates.
781 /// The tuple is `(path, source_path)`,
782 /// where `path` is in memory order and `source_path` in source order.
783 // FIXME(eddyb) track value ranges and traverse already optimized enums.
784 fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>,
785 param_env: ty::ParamEnv<'tcx>,
787 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>> {
788 match (ty.layout(tcx, param_env)?, &ty.sty) {
789 (&Scalar { non_zero: true, .. }, _) |
790 (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
791 (&FatPointer { non_zero: true, .. }, _) => {
792 Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
795 // Is this the NonZero lang item wrapping a pointer or integer type?
796 (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
797 let fields = &def.struct_variant().fields;
798 assert_eq!(fields.len(), 1);
799 match *fields[0].ty(tcx, substs).layout(tcx, param_env)? {
800 // FIXME(eddyb) also allow floating-point types here.
801 Scalar { value: Int(_), non_zero: false } |
802 Scalar { value: Pointer, non_zero: false } => {
803 Ok(Some((vec![0], vec![0])))
805 FatPointer { non_zero: false, .. } => {
806 let tmp = vec![FAT_PTR_ADDR as u32, 0];
807 Ok(Some((tmp.clone(), tmp)))
813 // Perhaps one of the fields of this struct is non-zero
814 // let's recurse and find out
815 (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
816 Struct::non_zero_field_paths(
819 def.struct_variant().fields.iter().map(|field| {
820 field.ty(tcx, substs)
822 Some(&variant.memory_index[..]))
825 // Perhaps one of the upvars of this closure is non-zero
826 (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
827 let upvar_tys = substs.upvar_tys(def, tcx);
828 Struct::non_zero_field_paths(
832 Some(&variant.memory_index[..]))
834 // Can we use one of the fields in this tuple?
835 (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
836 Struct::non_zero_field_paths(
840 Some(&variant.memory_index[..]))
843 // Is this a fixed-size array of something non-zero
844 // with at least one element?
845 (_, &ty::TyArray(ety, mut count)) => {
846 if count.has_projections() {
847 count = tcx.normalize_associated_type_in_env(&count, param_env);
848 if count.has_projections() {
849 return Err(LayoutError::Unknown(ty));
852 if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 {
853 Struct::non_zero_field_paths(
856 Some(ety).into_iter(),
863 (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
864 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
865 if ty == normalized {
868 return Struct::non_zero_field_in_type(tcx, param_env, normalized);
871 // Anything else is not a non-zero type.
876 /// Find the path leading to a non-zero leaf field, starting from
877 /// the given set of fields and recursing through aggregates.
878 /// Returns Some((path, source_path)) on success.
879 /// `path` is translated to memory order. `source_path` is not.
880 fn non_zero_field_paths<I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
881 param_env: ty::ParamEnv<'tcx>,
883 permutation: Option<&[u32]>)
884 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>>
885 where I: Iterator<Item=Ty<'tcx>> {
886 for (i, ty) in fields.enumerate() {
887 let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?;
888 if let Some((mut path, mut source_path)) = r {
889 source_path.push(i as u32);
890 let index = if let Some(p) = permutation {
895 path.push(index as u32);
896 return Ok(Some((path, source_path)));
902 pub fn over_align(&self) -> Option<u32> {
903 let align = self.align.abi();
904 let primitive_align = self.primitive_align.abi();
905 if align > primitive_align {
913 /// An untagged union.
914 #[derive(PartialEq, Eq, Hash, Debug)]
917 pub primitive_align: Align,
921 /// If true, no alignment padding is used.
925 impl<'a, 'tcx> Union {
926 fn new(dl: &TargetDataLayout, repr: &ReprOptions) -> Union {
927 if repr.packed() && repr.align > 0 {
928 bug!("Union cannot be packed and aligned");
931 let primitive_align = if repr.packed() {
937 let align = if repr.align > 0 {
938 let repr_align = repr.align as u64;
939 debug!("Union::new repr_align: {:?}", repr_align);
940 primitive_align.max(Align::from_bytes(repr_align, repr_align).unwrap())
948 min_size: Size::from_bytes(0),
949 packed: repr.packed(),
953 /// Extend the Struct with more fields.
954 fn extend<I>(&mut self, dl: &TargetDataLayout,
957 -> Result<(), LayoutError<'tcx>>
958 where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
959 for (index, field) in fields.enumerate() {
961 if field.is_unsized() {
962 bug!("Union::extend: field #{} of `{}` is unsized",
966 debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
969 self.align = self.align.max(field.align(dl));
970 self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
972 self.min_size = cmp::max(self.min_size, field.size(dl));
975 debug!("Union::extend min-size: {:?}", self.min_size);
980 /// Get the size with trailing alignment padding.
981 pub fn stride(&self) -> Size {
982 self.min_size.abi_align(self.align)
985 pub fn over_align(&self) -> Option<u32> {
986 let align = self.align.abi();
987 let primitive_align = self.primitive_align.abi();
988 if align > primitive_align {
996 /// The first half of a fat pointer.
997 /// - For a trait object, this is the address of the box.
998 /// - For a slice, this is the base address.
999 pub const FAT_PTR_ADDR: usize = 0;
1001 /// The second half of a fat pointer.
1002 /// - For a trait object, this is the address of the vtable.
1003 /// - For a slice, this is the length.
1004 pub const FAT_PTR_EXTRA: usize = 1;
1006 /// Type layout, from which size and alignment can be cheaply computed.
1007 /// For ADTs, it also includes field placement and enum optimizations.
1008 /// NOTE: Because Layout is interned, redundant information should be
1009 /// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
1010 #[derive(Debug, PartialEq, Eq, Hash)]
1012 /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
1015 // If true, the value cannot represent a bit pattern of all zeroes.
1019 /// SIMD vectors, from structs marked with #[repr(simd)].
1025 /// TyArray, TySlice or TyStr.
1027 /// If true, the size is exact, otherwise it's only a lower bound.
1030 primitive_align: Align,
1035 /// TyRawPtr or TyRef with a !Sized pointee.
1037 metadata: Primitive,
1038 /// If true, the pointer cannot be null.
1042 // Remaining variants are all ADTs such as structs, enums or tuples.
1044 /// C-like enums; basically an integer.
1049 /// Inclusive discriminant range.
1050 /// If min > max, it represents min...u64::MAX followed by 0...max.
1051 // FIXME(eddyb) always use the shortest range, e.g. by finding
1052 // the largest space between two consecutive discriminants and
1053 // taking everything else as the (shortest) discriminant range.
1058 /// Single-case enums, and structs/tuples.
1061 /// If true, the structure is NonZero.
1062 // FIXME(eddyb) use a newtype Layout kind for this.
1066 /// Untagged unions.
1071 /// General-case enums: for each case there is a struct, and they
1072 /// all start with a field for the discriminant.
1075 variants: Vec<Struct>,
1078 primitive_align: Align,
1081 /// Two cases distinguished by a nullable pointer: the case with discriminant
1082 /// `nndiscr` must have single field which is known to be nonnull due to its type.
1083 /// The other case is known to be zero sized. Hence we represent the enum
1084 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
1085 /// otherwise it indicates the other case.
1087 /// For example, `std::option::Option` instantiated at a safe pointer type
1088 /// is represented such that `None` is a null pointer and `Some` is the
1089 /// identity function.
1090 RawNullablePointer {
1095 /// Two cases distinguished by a nullable pointer: the case with discriminant
1096 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
1097 /// field is known to be nonnull due to its type; if that field is null, then
1098 /// it represents the other case, which is known to be zero sized.
1099 StructWrappedNullablePointer {
1102 /// N.B. There is a 0 at the start, for LLVM GEP through a pointer.
1103 discrfield: FieldPath,
1104 /// Like discrfield, but in source order. For debuginfo.
1105 discrfield_source: FieldPath
1109 #[derive(Copy, Clone, Debug)]
1110 pub enum LayoutError<'tcx> {
1112 SizeOverflow(Ty<'tcx>)
1115 impl<'tcx> fmt::Display for LayoutError<'tcx> {
1116 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1118 LayoutError::Unknown(ty) => {
1119 write!(f, "the type `{:?}` has an unknown layout", ty)
1121 LayoutError::SizeOverflow(ty) => {
1122 write!(f, "the type `{:?}` is too big for the current architecture", ty)
1128 impl<'a, 'tcx> Layout {
1129 pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1130 param_env: ty::ParamEnv<'tcx>,
1132 -> Result<&'tcx Layout, LayoutError<'tcx>> {
1133 let success = |layout| Ok(tcx.intern_layout(layout));
1134 let dl = &tcx.data_layout;
1135 assert!(!ty.has_infer_types());
1137 let ptr_layout = |pointee: Ty<'tcx>| {
1138 let non_zero = !ty.is_unsafe_ptr();
1139 let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
1140 if pointee.is_sized(tcx, param_env, DUMMY_SP) {
1141 Ok(Scalar { value: Pointer, non_zero: non_zero })
1143 let unsized_part = tcx.struct_tail(pointee);
1144 let meta = match unsized_part.sty {
1145 ty::TySlice(_) | ty::TyStr => {
1146 Int(dl.ptr_sized_integer())
1148 ty::TyDynamic(..) => Pointer,
1149 _ => return Err(LayoutError::Unknown(unsized_part))
1151 Ok(FatPointer { metadata: meta, non_zero: non_zero })
1155 let layout = match ty.sty {
1157 ty::TyBool => Scalar { value: Int(I1), non_zero: false },
1158 ty::TyChar => Scalar { value: Int(I32), non_zero: false },
1161 value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
1165 ty::TyUint(ity) => {
1167 value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
1171 ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
1172 ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
1173 ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
1176 ty::TyNever => Univariant {
1177 variant: Struct::new(dl, &vec![], &ReprOptions::default(),
1178 StructKind::AlwaysSizedUnivariant, ty)?,
1182 // Potentially-fat pointers.
1183 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1184 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1185 ptr_layout(pointee)?
1187 ty::TyAdt(def, _) if def.is_box() => {
1188 ptr_layout(ty.boxed_ty())?
1191 // Arrays and slices.
1192 ty::TyArray(element, mut count) => {
1193 if count.has_projections() {
1194 count = tcx.normalize_associated_type_in_env(&count, param_env);
1195 if count.has_projections() {
1196 return Err(LayoutError::Unknown(ty));
1200 let element = element.layout(tcx, param_env)?;
1201 let element_size = element.size(dl);
1202 let count = count.val.to_const_int().unwrap().to_u64().unwrap();
1203 if element_size.checked_mul(count, dl).is_none() {
1204 return Err(LayoutError::SizeOverflow(ty));
1208 align: element.align(dl),
1209 primitive_align: element.primitive_align(dl),
1214 ty::TySlice(element) => {
1215 let element = element.layout(tcx, param_env)?;
1218 align: element.align(dl),
1219 primitive_align: element.primitive_align(dl),
1220 element_size: element.size(dl),
1228 primitive_align: dl.i8_align,
1229 element_size: Size::from_bytes(1),
1235 ty::TyFnDef(..) => {
1237 variant: Struct::new(dl, &vec![],
1238 &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?,
1242 ty::TyDynamic(..) => {
1243 let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(),
1244 StructKind::AlwaysSizedUnivariant, ty)?;
1246 Univariant { variant: unit, non_zero: false }
1249 // Tuples, generators and closures.
1250 ty::TyGenerator(def_id, ref substs, _) => {
1251 let tys = substs.field_tys(def_id, tcx);
1252 let st = Struct::new(dl,
1253 &tys.map(|ty| ty.layout(tcx, param_env))
1254 .collect::<Result<Vec<_>, _>>()?,
1255 &ReprOptions::default(),
1256 StructKind::AlwaysSizedUnivariant, ty)?;
1257 Univariant { variant: st, non_zero: false }
1260 ty::TyClosure(def_id, ref substs) => {
1261 let tys = substs.upvar_tys(def_id, tcx);
1262 let st = Struct::new(dl,
1263 &tys.map(|ty| ty.layout(tcx, param_env))
1264 .collect::<Result<Vec<_>, _>>()?,
1265 &ReprOptions::default(),
1266 StructKind::AlwaysSizedUnivariant, ty)?;
1267 Univariant { variant: st, non_zero: false }
1270 ty::TyTuple(tys, _) => {
1271 let kind = if tys.len() == 0 {
1272 StructKind::AlwaysSizedUnivariant
1274 StructKind::MaybeUnsizedUnivariant
1277 let st = Struct::new(dl,
1278 &tys.iter().map(|ty| ty.layout(tcx, param_env))
1279 .collect::<Result<Vec<_>, _>>()?,
1280 &ReprOptions::default(), kind, ty)?;
1281 Univariant { variant: st, non_zero: false }
1284 // SIMD vector types.
1285 ty::TyAdt(def, ..) if def.repr.simd() => {
1286 let element = ty.simd_type(tcx);
1287 match *element.layout(tcx, param_env)? {
1288 Scalar { value, .. } => {
1289 return success(Vector {
1291 count: ty.simd_size(tcx) as u64
1295 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1296 a non-machine element type `{}`",
1303 ty::TyAdt(def, substs) => {
1304 if def.variants.is_empty() {
1305 // Uninhabitable; represent as unit
1306 // (Typechecking will reject discriminant-sizing attrs.)
1308 return success(Univariant {
1309 variant: Struct::new(dl, &vec![],
1310 &def.repr, StructKind::AlwaysSizedUnivariant, ty)?,
1315 if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
1316 // All bodies empty -> intlike
1317 let (mut min, mut max, mut non_zero) = (i64::max_value(),
1320 for discr in def.discriminants(tcx) {
1321 let x = discr.to_u128_unchecked() as i64;
1322 if x == 0 { non_zero = false; }
1323 if x < min { min = x; }
1324 if x > max { max = x; }
1327 // FIXME: should handle i128? signed-value based impl is weird and hard to
1329 let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1330 return success(CEnum {
1334 // FIXME: should be u128?
1340 if !def.is_enum() || (def.variants.len() == 1 &&
1341 !def.repr.inhibit_enum_layout_opt()) {
1342 // Struct, or union, or univariant enum equivalent to a struct.
1343 // (Typechecking will reject discriminant-sizing attrs.)
1345 let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
1346 StructKind::AlwaysSizedUnivariant
1348 let param_env = tcx.param_env(def.did);
1349 let fields = &def.variants[0].fields;
1350 let last_field = &fields[fields.len()-1];
1351 let always_sized = tcx.type_of(last_field.did)
1352 .is_sized(tcx, param_env, DUMMY_SP);
1353 if !always_sized { StructKind::MaybeUnsizedUnivariant }
1354 else { StructKind::AlwaysSizedUnivariant }
1357 let fields = def.variants[0].fields.iter().map(|field| {
1358 field.ty(tcx, substs).layout(tcx, param_env)
1359 }).collect::<Result<Vec<_>, _>>()?;
1360 let layout = if def.is_union() {
1361 let mut un = Union::new(dl, &def.repr);
1362 un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
1363 UntaggedUnion { variants: un }
1365 let st = Struct::new(dl, &fields, &def.repr,
1367 let non_zero = Some(def.did) == tcx.lang_items().non_zero();
1368 Univariant { variant: st, non_zero: non_zero }
1370 return success(layout);
1373 // Since there's at least one
1374 // non-empty body, explicit discriminants should have
1375 // been rejected by a checker before this point.
1376 for (i, v) in def.variants.iter().enumerate() {
1377 if v.discr != ty::VariantDiscr::Relative(i) {
1378 bug!("non-C-like enum {} with specified discriminants",
1379 tcx.item_path_str(def.did));
1383 // Cache the substituted and normalized variant field types.
1384 let variants = def.variants.iter().map(|v| {
1385 v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
1386 }).collect::<Vec<_>>();
1388 if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() {
1389 // Nullable pointer optimization
1391 let other_fields = variants[1 - discr].iter().map(|ty| {
1392 ty.layout(tcx, param_env)
1394 if !Struct::would_be_zero_sized(dl, other_fields)? {
1397 let paths = Struct::non_zero_field_paths(tcx,
1399 variants[discr].iter().cloned(),
1401 let (mut path, mut path_source) = if let Some(p) = paths { p }
1404 // FIXME(eddyb) should take advantage of a newtype.
1405 if path == &[0] && variants[discr].len() == 1 {
1406 let value = match *variants[discr][0].layout(tcx, param_env)? {
1407 Scalar { value, .. } => value,
1408 CEnum { discr, .. } => Int(discr),
1409 _ => bug!("Layout::compute: `{}`'s non-zero \
1410 `{}` field not scalar?!",
1411 ty, variants[discr][0])
1413 return success(RawNullablePointer {
1414 nndiscr: discr as u64,
1419 let st = Struct::new(dl,
1420 &variants[discr].iter().map(|ty| ty.layout(tcx, param_env))
1421 .collect::<Result<Vec<_>, _>>()?,
1422 &def.repr, StructKind::AlwaysSizedUnivariant, ty)?;
1424 // We have to fix the last element of path here.
1425 let mut i = *path.last().unwrap();
1426 i = st.memory_index[i as usize];
1427 *path.last_mut().unwrap() = i;
1428 path.push(0); // For GEP through a pointer.
1430 path_source.push(0);
1431 path_source.reverse();
1433 return success(StructWrappedNullablePointer {
1434 nndiscr: discr as u64,
1437 discrfield_source: path_source
1442 // The general case.
1443 let discr_max = (variants.len() - 1) as i64;
1444 assert!(discr_max >= 0);
1445 let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
1446 let mut align = dl.aggregate_align;
1447 let mut primitive_align = dl.aggregate_align;
1448 let mut size = Size::from_bytes(0);
1450 // We're interested in the smallest alignment, so start large.
1451 let mut start_align = Align::from_bytes(256, 256).unwrap();
1453 // Create the set of structs that represent each variant
1454 // Use the minimum integer type we figured out above
1455 let discr = Scalar { value: Int(min_ity), non_zero: false };
1456 let mut variants = variants.into_iter().map(|fields| {
1457 let mut fields = fields.into_iter().map(|field| {
1458 field.layout(tcx, param_env)
1459 }).collect::<Result<Vec<_>, _>>()?;
1460 fields.insert(0, &discr);
1461 let st = Struct::new(dl,
1463 &def.repr, StructKind::EnumVariant, ty)?;
1464 // Find the first field we can't move later
1465 // to make room for a larger discriminant.
1466 // It is important to skip the first field.
1467 for i in st.field_index_by_increasing_offset().skip(1) {
1468 let field = fields[i];
1469 let field_align = field.align(dl);
1470 if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1471 start_align = start_align.min(field_align);
1475 size = cmp::max(size, st.min_size);
1476 align = align.max(st.align);
1477 primitive_align = primitive_align.max(st.primitive_align);
1479 }).collect::<Result<Vec<_>, _>>()?;
1481 // Align the maximum variant size to the largest alignment.
1482 size = size.abi_align(align);
1484 if size.bytes() >= dl.obj_size_bound() {
1485 return Err(LayoutError::SizeOverflow(ty));
1488 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1489 if typeck_ity < min_ity {
1490 // It is a bug if Layout decided on a greater discriminant size than typeck for
1491 // some reason at this point (based on values discriminant can take on). Mostly
1492 // because this discriminant will be loaded, and then stored into variable of
1493 // type calculated by typeck. Consider such case (a bug): typeck decided on
1494 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1495 // discriminant values. That would be a bug, because then, in trans, in order
1496 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1497 // space necessary to represent would have to be discarded (or layout is wrong
1498 // on thinking it needs 16 bits)
1499 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1500 min_ity, typeck_ity);
1501 // However, it is fine to make discr type however large (as an optimisation)
1502 // after this point – we’ll just truncate the value we load in trans.
1505 // Check to see if we should use a different type for the
1506 // discriminant. We can safely use a type with the same size
1507 // as the alignment of the first field of each variant.
1508 // We increase the size of the discriminant to avoid LLVM copying
1509 // padding when it doesn't need to. This normally causes unaligned
1510 // load/stores and excessive memcpy/memset operations. By using a
1511 // bigger integer size, LLVM can be sure about it's contents and
1512 // won't be so conservative.
1514 // Use the initial field alignment
1515 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
1517 // If the alignment is not larger than the chosen discriminant size,
1518 // don't use the alignment as the final size.
1522 // Patch up the variants' first few fields.
1523 let old_ity_size = Int(min_ity).size(dl);
1524 let new_ity_size = Int(ity).size(dl);
1525 for variant in &mut variants {
1526 for i in variant.offsets.iter_mut() {
1527 // The first field is the discrimminant, at offset 0.
1528 // These aren't in order, and we need to skip it.
1529 if *i <= old_ity_size && *i > Size::from_bytes(0) {
1533 // We might be making the struct larger.
1534 if variant.min_size <= old_ity_size {
1535 variant.min_size = new_ity_size;
1549 // Types with no meaningful known layout.
1550 ty::TyProjection(_) | ty::TyAnon(..) => {
1551 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
1552 if ty == normalized {
1553 return Err(LayoutError::Unknown(ty));
1555 return normalized.layout(tcx, param_env);
1558 return Err(LayoutError::Unknown(ty));
1560 ty::TyInfer(_) | ty::TyError => {
1561 bug!("Layout::compute: unexpected type `{}`", ty)
1568 /// Returns true if the layout corresponds to an unsized type.
1569 pub fn is_unsized(&self) -> bool {
1571 Scalar {..} | Vector {..} | FatPointer {..} |
1572 CEnum {..} | UntaggedUnion {..} | General {..} |
1573 RawNullablePointer {..} |
1574 StructWrappedNullablePointer {..} => false,
1576 Array { sized, .. } |
1577 Univariant { variant: Struct { sized, .. }, .. } => !sized
1581 pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
1582 let dl = cx.data_layout();
1585 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1589 Vector { element, count } => {
1590 let element_size = element.size(dl);
1591 let vec_size = match element_size.checked_mul(count, dl) {
1593 None => bug!("Layout::size({:?}): {} * {} overflowed",
1594 self, element_size.bytes(), count)
1596 vec_size.abi_align(self.align(dl))
1599 Array { element_size, count, .. } => {
1600 match element_size.checked_mul(count, dl) {
1602 None => bug!("Layout::size({:?}): {} * {} overflowed",
1603 self, element_size.bytes(), count)
1607 FatPointer { metadata, .. } => {
1608 // Effectively a (ptr, meta) tuple.
1609 Pointer.size(dl).abi_align(metadata.align(dl))
1610 .checked_add(metadata.size(dl), dl).unwrap()
1611 .abi_align(self.align(dl))
1614 CEnum { discr, .. } => Int(discr).size(dl),
1615 General { size, .. } => size,
1616 UntaggedUnion { ref variants } => variants.stride(),
1618 Univariant { ref variant, .. } |
1619 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1625 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
1626 let dl = cx.data_layout();
1629 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1633 Vector { element, count } => {
1634 let elem_size = element.size(dl);
1635 let vec_size = match elem_size.checked_mul(count, dl) {
1637 None => bug!("Layout::align({:?}): {} * {} overflowed",
1638 self, elem_size.bytes(), count)
1640 for &(size, align) in &dl.vector_align {
1641 if size == vec_size {
1645 // Default to natural alignment, which is what LLVM does.
1646 // That is, use the size, rounded up to a power of 2.
1647 let align = vec_size.bytes().next_power_of_two();
1648 Align::from_bytes(align, align).unwrap()
1651 FatPointer { metadata, .. } => {
1652 // Effectively a (ptr, meta) tuple.
1653 Pointer.align(dl).max(metadata.align(dl))
1656 CEnum { discr, .. } => Int(discr).align(dl),
1657 Array { align, .. } | General { align, .. } => align,
1658 UntaggedUnion { ref variants } => variants.align,
1660 Univariant { ref variant, .. } |
1661 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1667 /// Returns alignment before repr alignment is applied
1668 pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
1670 Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
1671 Univariant { ref variant, .. } |
1672 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1673 variant.primitive_align
1680 /// Returns repr alignment if it is greater than the primitive alignment.
1681 pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
1682 let align = self.align(dl);
1683 let primitive_align = self.primitive_align(dl);
1684 if align.abi() > primitive_align.abi() {
1685 Some(align.abi() as u32)
1691 pub fn field_offset<C: HasDataLayout>(&self,
1694 variant_index: Option<usize>)
1696 let dl = cx.data_layout();
1701 UntaggedUnion { .. } |
1702 RawNullablePointer { .. } => {
1706 Vector { element, count } => {
1707 let element_size = element.size(dl);
1710 Size::from_bytes(element_size.bytes() * count)
1713 Array { element_size, count, .. } => {
1716 Size::from_bytes(element_size.bytes() * count)
1719 FatPointer { metadata, .. } => {
1720 // Effectively a (ptr, meta) tuple.
1725 Pointer.size(dl).abi_align(metadata.align(dl))
1729 Univariant { ref variant, .. } => variant.offsets[i],
1731 General { ref variants, .. } => {
1732 let v = variant_index.expect("variant index required");
1733 variants[v].offsets[i + 1]
1736 StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
1737 if Some(nndiscr as usize) == variant_index {
1746 /// This is invoked by the `layout_raw` query to record the final
1747 /// layout of each type.
1749 pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1751 param_env: ty::ParamEnv<'tcx>,
1753 // If we are running with `-Zprint-type-sizes`, record layouts for
1754 // dumping later. Ignore layouts that are done with non-empty
1755 // environments or non-monomorphic layouts, as the user only wants
1756 // to see the stuff resulting from the final trans session.
1758 !tcx.sess.opts.debugging_opts.print_type_sizes ||
1759 ty.has_param_types() ||
1761 !param_env.caller_bounds.is_empty()
1766 Self::record_layout_for_printing_outlined(tcx, ty, param_env, layout)
1769 fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1771 param_env: ty::ParamEnv<'tcx>,
1773 // (delay format until we actually need it)
1774 let record = |kind, opt_discr_size, variants| {
1775 let type_desc = format!("{:?}", ty);
1776 let overall_size = layout.size(tcx);
1777 let align = layout.align(tcx);
1778 tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1786 let (adt_def, substs) = match ty.sty {
1787 ty::TyAdt(ref adt_def, substs) => {
1788 debug!("print-type-size t: `{:?}` process adt", ty);
1792 ty::TyClosure(..) => {
1793 debug!("print-type-size t: `{:?}` record closure", ty);
1794 record(DataTypeKind::Closure, None, vec![]);
1799 debug!("print-type-size t: `{:?}` skip non-nominal", ty);
1804 let adt_kind = adt_def.adt_kind();
1806 let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| {
1807 let layout = field_ty.layout(tcx, param_env);
1809 Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty),
1810 Ok(field_layout) => {
1811 session::FieldInfo {
1812 name: field_name.to_string(),
1813 offset: offset.bytes(),
1814 size: field_layout.size(tcx).bytes(),
1815 align: field_layout.align(tcx).abi(),
1821 let build_primitive_info = |name: ast::Name, value: &Primitive| {
1822 session::VariantInfo {
1823 name: Some(name.to_string()),
1824 kind: session::SizeKind::Exact,
1825 align: value.align(tcx).abi(),
1826 size: value.size(tcx).bytes(),
1832 WithDiscrim(&'a Struct),
1833 NoDiscrim(&'a Struct),
1836 let build_variant_info = |n: Option<ast::Name>,
1837 flds: &[(ast::Name, Ty<'tcx>)],
1839 let (s, field_offsets) = match layout {
1840 Fields::WithDiscrim(s) => (s, &s.offsets[1..]),
1841 Fields::NoDiscrim(s) => (s, &s.offsets[0..]),
1843 let field_info: Vec<_> =
1845 .zip(field_offsets.iter())
1846 .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset))
1849 session::VariantInfo {
1850 name: n.map(|n|n.to_string()),
1852 session::SizeKind::Exact
1854 session::SizeKind::Min
1856 align: s.align.abi(),
1857 size: s.min_size.bytes(),
1863 Layout::StructWrappedNullablePointer { nonnull: ref variant_layout,
1866 discrfield_source: _ } => {
1867 debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}",
1868 ty, nndiscr, variant_layout);
1869 let variant_def = &adt_def.variants[nndiscr as usize];
1870 let fields: Vec<_> =
1871 variant_def.fields.iter()
1872 .map(|field_def| (field_def.name, field_def.ty(tcx, substs)))
1874 record(adt_kind.into(),
1876 vec![build_variant_info(Some(variant_def.name),
1878 Fields::NoDiscrim(variant_layout))]);
1880 Layout::RawNullablePointer { nndiscr, value } => {
1881 debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}",
1882 ty, nndiscr, value);
1883 let variant_def = &adt_def.variants[nndiscr as usize];
1884 record(adt_kind.into(), None,
1885 vec![build_primitive_info(variant_def.name, &value)]);
1887 Layout::Univariant { variant: ref variant_layout, non_zero: _ } => {
1888 let variant_names = || {
1889 adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::<Vec<_>>()
1891 debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}",
1892 ty, variant_layout, variant_names());
1893 assert!(adt_def.variants.len() <= 1,
1894 "univariant with variants {:?}", variant_names());
1895 if adt_def.variants.len() == 1 {
1896 let variant_def = &adt_def.variants[0];
1897 let fields: Vec<_> =
1898 variant_def.fields.iter()
1899 .map(|f| (f.name, f.ty(tcx, substs)))
1901 record(adt_kind.into(),
1903 vec![build_variant_info(Some(variant_def.name),
1905 Fields::NoDiscrim(variant_layout))]);
1907 // (This case arises for *empty* enums; so give it
1909 record(adt_kind.into(), None, vec![]);
1913 Layout::General { ref variants, discr, .. } => {
1914 debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}",
1915 ty, adt_def.variants.len(), variants.len(), variants);
1916 let variant_infos: Vec<_> =
1917 adt_def.variants.iter()
1918 .zip(variants.iter())
1919 .map(|(variant_def, variant_layout)| {
1920 let fields: Vec<_> =
1923 .map(|f| (f.name, f.ty(tcx, substs)))
1925 build_variant_info(Some(variant_def.name),
1927 Fields::WithDiscrim(variant_layout))
1930 record(adt_kind.into(), Some(discr.size()), variant_infos);
1933 Layout::UntaggedUnion { ref variants } => {
1934 debug!("print-type-size t: `{:?}` adt union variants {:?}",
1936 // layout does not currently store info about each
1938 record(adt_kind.into(), None, Vec::new());
1941 Layout::CEnum { discr, .. } => {
1942 debug!("print-type-size t: `{:?}` adt c-like enum", ty);
1943 let variant_infos: Vec<_> =
1944 adt_def.variants.iter()
1945 .map(|variant_def| {
1946 build_primitive_info(variant_def.name,
1947 &Primitive::Int(discr))
1950 record(adt_kind.into(), Some(discr.size()), variant_infos);
1953 // other cases provide little interesting (i.e. adjustable
1954 // via representation tweaks) size info beyond total size.
1955 Layout::Scalar { .. } |
1956 Layout::Vector { .. } |
1957 Layout::Array { .. } |
1958 Layout::FatPointer { .. } => {
1959 debug!("print-type-size t: `{:?}` adt other", ty);
1960 record(adt_kind.into(), None, Vec::new())
1966 /// Type size "skeleton", i.e. the only information determining a type's size.
1967 /// While this is conservative, (aside from constant sizes, only pointers,
1968 /// newtypes thereof and null pointer optimized enums are allowed), it is
1969 /// enough to statically check common usecases of transmute.
1970 #[derive(Copy, Clone, Debug)]
1971 pub enum SizeSkeleton<'tcx> {
1972 /// Any statically computable Layout.
1975 /// A potentially-fat pointer.
1977 /// If true, this pointer is never null.
1979 /// The type which determines the unsized metadata, if any,
1980 /// of this pointer. Either a type parameter or a projection
1981 /// depending on one, with regions erased.
1986 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1987 pub fn compute(ty: Ty<'tcx>,
1988 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1989 param_env: ty::ParamEnv<'tcx>)
1990 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1991 assert!(!ty.has_infer_types());
1993 // First try computing a static layout.
1994 let err = match ty.layout(tcx, param_env) {
1996 return Ok(SizeSkeleton::Known(layout.size(tcx)));
2001 let ptr_skeleton = |pointee: Ty<'tcx>| {
2002 let non_zero = !ty.is_unsafe_ptr();
2003 let tail = tcx.struct_tail(pointee);
2005 ty::TyParam(_) | ty::TyProjection(_) => {
2006 assert!(tail.has_param_types() || tail.has_self_ty());
2007 Ok(SizeSkeleton::Pointer {
2009 tail: tcx.erase_regions(&tail)
2013 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
2014 tail `{}` is not a type parameter or a projection",
2021 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2022 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2023 ptr_skeleton(pointee)
2025 ty::TyAdt(def, _) if def.is_box() => {
2026 ptr_skeleton(ty.boxed_ty())
2029 ty::TyAdt(def, substs) => {
2030 // Only newtypes and enums w/ nullable pointer optimization.
2031 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
2035 // Get a zero-sized variant or a pointer newtype.
2036 let zero_or_ptr_variant = |i: usize| {
2037 let fields = def.variants[i].fields.iter().map(|field| {
2038 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2041 for field in fields {
2044 SizeSkeleton::Known(size) => {
2045 if size.bytes() > 0 {
2049 SizeSkeleton::Pointer {..} => {
2060 let v0 = zero_or_ptr_variant(0)?;
2062 if def.variants.len() == 1 {
2063 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2064 return Ok(SizeSkeleton::Pointer {
2065 non_zero: non_zero ||
2066 Some(def.did) == tcx.lang_items().non_zero(),
2074 let v1 = zero_or_ptr_variant(1)?;
2075 // Nullable pointer enum optimization.
2077 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
2078 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2079 Ok(SizeSkeleton::Pointer {
2088 ty::TyProjection(_) | ty::TyAnon(..) => {
2089 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
2090 if ty == normalized {
2093 SizeSkeleton::compute(normalized, tcx, param_env)
2101 pub fn same_size(self, other: SizeSkeleton) -> bool {
2102 match (self, other) {
2103 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2104 (SizeSkeleton::Pointer { tail: a, .. },
2105 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
2111 /// A pair of a type and its layout. Implements various
2112 /// type traversal APIs (e.g. recursing into fields).
2113 #[derive(Copy, Clone, Debug)]
2114 pub struct TyLayout<'tcx> {
2116 pub layout: &'tcx Layout,
2117 pub variant_index: Option<usize>,
2120 impl<'tcx> Deref for TyLayout<'tcx> {
2121 type Target = Layout;
2122 fn deref(&self) -> &Layout {
2127 pub trait LayoutTyper<'tcx>: HasDataLayout {
2130 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
2131 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
2132 fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>;
2135 /// Combines a tcx with the parameter environment so that you can
2136 /// compute layout operations.
2137 #[derive(Copy, Clone)]
2138 pub struct LayoutCx<'a, 'tcx: 'a> {
2139 tcx: TyCtxt<'a, 'tcx, 'tcx>,
2140 param_env: ty::ParamEnv<'tcx>,
2143 impl<'a, 'tcx> LayoutCx<'a, 'tcx> {
2144 pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
2145 LayoutCx { tcx, param_env }
2149 impl<'a, 'tcx> HasDataLayout for LayoutCx<'a, 'tcx> {
2150 fn data_layout(&self) -> &TargetDataLayout {
2151 &self.tcx.data_layout
2155 impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> {
2156 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
2158 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
2162 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
2163 let ty = self.normalize_projections(ty);
2167 layout: ty.layout(self.tcx, self.param_env)?,
2172 fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
2173 self.tcx.normalize_associated_type_in_env(&ty, self.param_env)
2177 impl<'a, 'tcx> TyLayout<'tcx> {
2178 pub fn for_variant(&self, variant_index: usize) -> Self {
2180 variant_index: Some(variant_index),
2185 pub fn field_offset<C: HasDataLayout>(&self, cx: C, i: usize) -> Size {
2186 self.layout.field_offset(cx, i, self.variant_index)
2189 pub fn field_count(&self) -> usize {
2190 // Handle enum/union through the type rather than Layout.
2191 if let ty::TyAdt(def, _) = self.ty.sty {
2192 let v = self.variant_index.unwrap_or(0);
2193 if def.variants.is_empty() {
2197 return def.variants[v].fields.len();
2201 match *self.layout {
2203 bug!("TyLayout::field_count({:?}): not applicable", self)
2206 // Handled above (the TyAdt case).
2209 UntaggedUnion { .. } |
2210 RawNullablePointer { .. } |
2211 StructWrappedNullablePointer { .. } => bug!(),
2213 FatPointer { .. } => 2,
2215 Vector { count, .. } |
2216 Array { count, .. } => {
2217 let usize_count = count as usize;
2218 assert_eq!(usize_count as u64, count);
2222 Univariant { ref variant, .. } => variant.offsets.len(),
2226 pub fn field_type<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
2229 let ptr_field_type = |pointee: Ty<'tcx>| {
2231 let slice = |element: Ty<'tcx>| {
2233 tcx.mk_mut_ptr(element)
2238 match tcx.struct_tail(pointee).sty {
2239 ty::TySlice(element) => slice(element),
2240 ty::TyStr => slice(tcx.types.u8),
2241 ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()),
2242 _ => bug!("TyLayout::field_type({:?}): not applicable", self)
2255 ty::TyDynamic(..) => {
2256 bug!("TyLayout::field_type({:?}): not applicable", self)
2259 // Potentially-fat pointers.
2260 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2261 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2262 ptr_field_type(pointee)
2264 ty::TyAdt(def, _) if def.is_box() => {
2265 ptr_field_type(self.ty.boxed_ty())
2268 // Arrays and slices.
2269 ty::TyArray(element, _) |
2270 ty::TySlice(element) => element,
2271 ty::TyStr => tcx.types.u8,
2273 // Tuples, generators and closures.
2274 ty::TyClosure(def_id, ref substs) => {
2275 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2278 ty::TyGenerator(def_id, ref substs, _) => {
2279 substs.field_tys(def_id, tcx).nth(i).unwrap()
2282 ty::TyTuple(tys, _) => tys[i],
2284 // SIMD vector types.
2285 ty::TyAdt(def, ..) if def.repr.simd() => {
2286 self.ty.simd_type(tcx)
2290 ty::TyAdt(def, substs) => {
2291 def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
2294 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
2295 ty::TyInfer(_) | ty::TyError => {
2296 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
2301 pub fn field<C: LayoutTyper<'tcx>>(&self,
2305 cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
2309 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Layout
2311 fn hash_stable<W: StableHasherResult>(&self,
2312 hcx: &mut StableHashingContext<'gcx>,
2313 hasher: &mut StableHasher<W>) {
2314 use ty::layout::Layout::*;
2315 mem::discriminant(self).hash_stable(hcx, hasher);
2318 Scalar { value, non_zero } => {
2319 value.hash_stable(hcx, hasher);
2320 non_zero.hash_stable(hcx, hasher);
2322 Vector { element, count } => {
2323 element.hash_stable(hcx, hasher);
2324 count.hash_stable(hcx, hasher);
2326 Array { sized, align, primitive_align, element_size, count } => {
2327 sized.hash_stable(hcx, hasher);
2328 align.hash_stable(hcx, hasher);
2329 primitive_align.hash_stable(hcx, hasher);
2330 element_size.hash_stable(hcx, hasher);
2331 count.hash_stable(hcx, hasher);
2333 FatPointer { ref metadata, non_zero } => {
2334 metadata.hash_stable(hcx, hasher);
2335 non_zero.hash_stable(hcx, hasher);
2337 CEnum { discr, signed, non_zero, min, max } => {
2338 discr.hash_stable(hcx, hasher);
2339 signed.hash_stable(hcx, hasher);
2340 non_zero.hash_stable(hcx, hasher);
2341 min.hash_stable(hcx, hasher);
2342 max.hash_stable(hcx, hasher);
2344 Univariant { ref variant, non_zero } => {
2345 variant.hash_stable(hcx, hasher);
2346 non_zero.hash_stable(hcx, hasher);
2348 UntaggedUnion { ref variants } => {
2349 variants.hash_stable(hcx, hasher);
2351 General { discr, ref variants, size, align, primitive_align } => {
2352 discr.hash_stable(hcx, hasher);
2353 variants.hash_stable(hcx, hasher);
2354 size.hash_stable(hcx, hasher);
2355 align.hash_stable(hcx, hasher);
2356 primitive_align.hash_stable(hcx, hasher);
2358 RawNullablePointer { nndiscr, ref value } => {
2359 nndiscr.hash_stable(hcx, hasher);
2360 value.hash_stable(hcx, hasher);
2362 StructWrappedNullablePointer {
2366 ref discrfield_source
2368 nndiscr.hash_stable(hcx, hasher);
2369 nonnull.hash_stable(hcx, hasher);
2370 discrfield.hash_stable(hcx, hasher);
2371 discrfield_source.hash_stable(hcx, hasher);
2377 impl_stable_hash_for!(enum ::ty::layout::Integer {
2386 impl_stable_hash_for!(enum ::ty::layout::Primitive {
2393 impl_stable_hash_for!(struct ::ty::layout::Align {
2398 impl_stable_hash_for!(struct ::ty::layout::Size {
2402 impl<'gcx> HashStable<StableHashingContext<'gcx>> for LayoutError<'gcx>
2404 fn hash_stable<W: StableHasherResult>(&self,
2405 hcx: &mut StableHashingContext<'gcx>,
2406 hasher: &mut StableHasher<W>) {
2407 use ty::layout::LayoutError::*;
2408 mem::discriminant(self).hash_stable(hcx, hasher);
2412 SizeOverflow(t) => t.hash_stable(hcx, hasher)
2417 impl_stable_hash_for!(struct ::ty::layout::Struct {
2427 impl_stable_hash_for!(struct ::ty::layout::Union {