1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 pub use self::Integer::*;
12 pub use self::Primitive::*;
14 use session::{self, DataTypeKind, Session};
15 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags};
17 use syntax::ast::{self, FloatTy, IntTy, UintTy};
19 use syntax_pos::DUMMY_SP;
26 use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive};
28 use ich::StableHashingContext;
29 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
32 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
33 /// for a target, which contains everything needed to compute layouts.
34 pub struct TargetDataLayout {
41 pub i128_align: Align,
44 pub pointer_size: Size,
45 pub pointer_align: Align,
46 pub aggregate_align: Align,
48 /// Alignments for vector types.
49 pub vector_align: Vec<(Size, Align)>
52 impl Default for TargetDataLayout {
53 /// Creates an instance of `TargetDataLayout`.
54 fn default() -> TargetDataLayout {
57 i1_align: Align::from_bits(8, 8).unwrap(),
58 i8_align: Align::from_bits(8, 8).unwrap(),
59 i16_align: Align::from_bits(16, 16).unwrap(),
60 i32_align: Align::from_bits(32, 32).unwrap(),
61 i64_align: Align::from_bits(32, 64).unwrap(),
62 i128_align: Align::from_bits(32, 64).unwrap(),
63 f32_align: Align::from_bits(32, 32).unwrap(),
64 f64_align: Align::from_bits(64, 64).unwrap(),
65 pointer_size: Size::from_bits(64),
66 pointer_align: Align::from_bits(64, 64).unwrap(),
67 aggregate_align: Align::from_bits(0, 64).unwrap(),
69 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
70 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
76 impl TargetDataLayout {
77 pub fn parse(sess: &Session) -> TargetDataLayout {
78 // Parse a bit count from a string.
79 let parse_bits = |s: &str, kind: &str, cause: &str| {
80 s.parse::<u64>().unwrap_or_else(|err| {
81 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
82 kind, s, cause, err));
87 // Parse a size string.
88 let size = |s: &str, cause: &str| {
89 Size::from_bits(parse_bits(s, "size", cause))
92 // Parse an alignment string.
93 let align = |s: &[&str], cause: &str| {
95 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
97 let abi = parse_bits(s[0], "alignment", cause);
98 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
99 Align::from_bits(abi, pref).unwrap_or_else(|err| {
100 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
102 Align::from_bits(8, 8).unwrap()
106 let mut dl = TargetDataLayout::default();
107 let mut i128_align_src = 64;
108 for spec in sess.target.target.data_layout.split("-") {
109 match &spec.split(":").collect::<Vec<_>>()[..] {
110 &["e"] => dl.endian = Endian::Little,
111 &["E"] => dl.endian = Endian::Big,
112 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
113 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
114 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
115 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
116 dl.pointer_size = size(s, p);
117 dl.pointer_align = align(a, p);
119 &[s, ref a..] if s.starts_with("i") => {
120 let bits = match s[1..].parse::<u64>() {
123 size(&s[1..], "i"); // For the user error.
129 1 => dl.i1_align = a,
130 8 => dl.i8_align = a,
131 16 => dl.i16_align = a,
132 32 => dl.i32_align = a,
133 64 => dl.i64_align = a,
136 if bits >= i128_align_src && bits <= 128 {
137 // Default alignment for i128 is decided by taking the alignment of
138 // largest-sized i{64...128}.
139 i128_align_src = bits;
143 &[s, ref a..] if s.starts_with("v") => {
144 let v_size = size(&s[1..], "v");
146 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
150 // No existing entry, add a new one.
151 dl.vector_align.push((v_size, a));
153 _ => {} // Ignore everything else.
157 // Perform consistency checks against the Target information.
158 let endian_str = match dl.endian {
159 Endian::Little => "little",
162 if endian_str != sess.target.target.target_endian {
163 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
164 architecture is {}-endian, while \"target-endian\" is `{}`",
165 endian_str, sess.target.target.target_endian));
168 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
169 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
170 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
171 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
177 /// Return exclusive upper bound on object size.
179 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
180 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
181 /// index every address within an object along with one byte past the end, along with allowing
182 /// `isize` to store the difference between any two pointers into an object.
184 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
185 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
186 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
187 /// address space on 64-bit ARMv8 and x86_64.
188 pub fn obj_size_bound(&self) -> u64 {
189 match self.pointer_size.bits() {
193 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
197 pub fn ptr_sized_integer(&self) -> Integer {
198 match self.pointer_size.bits() {
202 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
206 pub fn vector_align(&self, vec_size: Size) -> Align {
207 for &(size, align) in &self.vector_align {
208 if size == vec_size {
212 // Default to natural alignment, which is what LLVM does.
213 // That is, use the size, rounded up to a power of 2.
214 let align = vec_size.bytes().next_power_of_two();
215 Align::from_bytes(align, align).unwrap()
219 pub trait HasDataLayout: Copy {
220 fn data_layout(&self) -> &TargetDataLayout;
223 impl<'a> HasDataLayout for &'a TargetDataLayout {
224 fn data_layout(&self) -> &TargetDataLayout {
229 /// Endianness of the target, which must match cfg(target-endian).
230 #[derive(Copy, Clone)]
236 /// Size of a type in bytes.
237 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
243 pub fn from_bits(bits: u64) -> Size {
244 // Avoid potential overflow from `bits + 7`.
245 Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
248 pub fn from_bytes(bytes: u64) -> Size {
249 if bytes >= (1 << 61) {
250 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
257 pub fn bytes(self) -> u64 {
261 pub fn bits(self) -> u64 {
265 pub fn abi_align(self, align: Align) -> Size {
266 let mask = align.abi() - 1;
267 Size::from_bytes((self.bytes() + mask) & !mask)
270 pub fn is_abi_aligned(self, align: Align) -> bool {
271 let mask = align.abi() - 1;
272 self.bytes() & mask == 0
275 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
276 let dl = cx.data_layout();
278 // Each Size is less than dl.obj_size_bound(), so the sum is
279 // also less than 1 << 62 (and therefore can't overflow).
280 let bytes = self.bytes() + offset.bytes();
282 if bytes < dl.obj_size_bound() {
283 Some(Size::from_bytes(bytes))
289 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
290 let dl = cx.data_layout();
292 match self.bytes().checked_mul(count) {
293 Some(bytes) if bytes < dl.obj_size_bound() => {
294 Some(Size::from_bytes(bytes))
301 // Panicking addition, subtraction and multiplication for convenience.
302 // Avoid during layout computation, return `LayoutError` instead.
306 fn add(self, other: Size) -> Size {
307 // Each Size is less than 1 << 61, so the sum is
308 // less than 1 << 62 (and therefore can't overflow).
309 Size::from_bytes(self.bytes() + other.bytes())
315 fn sub(self, other: Size) -> Size {
316 // Each Size is less than 1 << 61, so an underflow
317 // would result in a value larger than 1 << 61,
318 // which Size::from_bytes will catch for us.
319 Size::from_bytes(self.bytes() - other.bytes())
323 impl Mul<u64> for Size {
325 fn mul(self, count: u64) -> Size {
326 match self.bytes().checked_mul(count) {
327 Some(bytes) => Size::from_bytes(bytes),
329 bug!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count)
335 impl AddAssign for Size {
336 fn add_assign(&mut self, other: Size) {
337 *self = *self + other;
341 /// Alignment of a type in bytes, both ABI-mandated and preferred.
342 /// Each field is a power of two, giving the alignment a maximum
343 /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with
344 /// a maximum capacity of 2^31 - 1 or 2147483647.
345 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
352 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
353 Align::from_bytes(Size::from_bits(abi).bytes(),
354 Size::from_bits(pref).bytes())
357 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
358 let log2 = |align: u64| {
359 // Treat an alignment of 0 bytes like 1-byte alignment.
364 let mut bytes = align;
366 while (bytes & 1) == 0 {
371 Err(format!("`{}` is not a power of 2", align))
373 Err(format!("`{}` is too large", align))
385 pub fn abi(self) -> u64 {
389 pub fn pref(self) -> u64 {
393 pub fn abi_bits(self) -> u64 {
397 pub fn pref_bits(self) -> u64 {
401 pub fn min(self, other: Align) -> Align {
403 abi: cmp::min(self.abi, other.abi),
404 pref: cmp::min(self.pref, other.pref),
408 pub fn max(self, other: Align) -> Align {
410 abi: cmp::max(self.abi, other.abi),
411 pref: cmp::max(self.pref, other.pref),
416 /// Integers, also used for enum discriminants.
417 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
426 impl<'a, 'tcx> Integer {
427 pub fn size(&self) -> Size {
429 I8 => Size::from_bytes(1),
430 I16 => Size::from_bytes(2),
431 I32 => Size::from_bytes(4),
432 I64 => Size::from_bytes(8),
433 I128 => Size::from_bytes(16),
437 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
438 let dl = cx.data_layout();
445 I128 => dl.i128_align,
449 pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
450 match (*self, signed) {
451 (I8, false) => tcx.types.u8,
452 (I16, false) => tcx.types.u16,
453 (I32, false) => tcx.types.u32,
454 (I64, false) => tcx.types.u64,
455 (I128, false) => tcx.types.u128,
456 (I8, true) => tcx.types.i8,
457 (I16, true) => tcx.types.i16,
458 (I32, true) => tcx.types.i32,
459 (I64, true) => tcx.types.i64,
460 (I128, true) => tcx.types.i128,
464 /// Find the smallest Integer type which can represent the signed value.
465 pub fn fit_signed(x: i128) -> Integer {
467 -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
468 -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
469 -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
470 -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
475 /// Find the smallest Integer type which can represent the unsigned value.
476 pub fn fit_unsigned(x: u128) -> Integer {
478 0...0x0000_0000_0000_00ff => I8,
479 0...0x0000_0000_0000_ffff => I16,
480 0...0x0000_0000_ffff_ffff => I32,
481 0...0xffff_ffff_ffff_ffff => I64,
486 /// Find the smallest integer with the given alignment.
487 pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
488 let dl = cx.data_layout();
490 let wanted = align.abi();
491 for &candidate in &[I8, I16, I32, I64, I128] {
492 let ty = Int(candidate, false);
493 if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
494 return Some(candidate);
500 /// Get the Integer type from an attr::IntType.
501 pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
502 let dl = cx.data_layout();
505 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
506 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
507 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
508 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
509 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
510 attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
511 dl.ptr_sized_integer()
516 /// Find the appropriate Integer type and signedness for the given
517 /// signed discriminant range and #[repr] attribute.
518 /// N.B.: u128 values above i128::MAX will be treated as signed, but
519 /// that shouldn't affect anything, other than maybe debuginfo.
520 fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>,
526 // Theoretically, negative values could be larger in unsigned representation
527 // than the unsigned representation of the signed minimum. However, if there
528 // are any negative values, the only valid unsigned representation is u128
529 // which can fit all i128 values, so the result remains unaffected.
530 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
531 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
533 let mut min_from_extern = None;
534 let min_default = I8;
536 if let Some(ity) = repr.int {
537 let discr = Integer::from_attr(tcx, ity);
538 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
540 bug!("Integer::repr_discr: `#[repr]` hint too small for \
541 discriminant range of enum `{}", ty)
543 return (discr, ity.is_signed());
547 match &tcx.sess.target.target.arch[..] {
548 // WARNING: the ARM EABI has two variants; the one corresponding
549 // to `at_least == I32` appears to be used on Linux and NetBSD,
550 // but some systems may use the variant corresponding to no
551 // lower bound. However, we don't run on those yet...?
552 "arm" => min_from_extern = Some(I32),
553 _ => min_from_extern = Some(I32),
557 let at_least = min_from_extern.unwrap_or(min_default);
559 // If there are no negative values, we can use the unsigned fit.
561 (cmp::max(unsigned_fit, at_least), false)
563 (cmp::max(signed_fit, at_least), true)
568 /// Fundamental unit of memory access and layout.
569 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
571 /// The `bool` is the signedness of the `Integer` type.
573 /// One would think we would not care about such details this low down,
574 /// but some ABIs are described in terms of C types and ISAs where the
575 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
576 /// a negative integer passed by zero-extension will appear positive in
577 /// the callee, and most operations on it will produce the wrong values.
584 impl<'a, 'tcx> Primitive {
585 pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
586 let dl = cx.data_layout();
589 Int(i, _) => i.size(),
590 F32 => Size::from_bits(32),
591 F64 => Size::from_bits(64),
592 Pointer => dl.pointer_size
596 pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
597 let dl = cx.data_layout();
600 Int(i, _) => i.align(dl),
603 Pointer => dl.pointer_align
607 pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
609 Int(i, signed) => i.to_ty(tcx, signed),
610 F32 => tcx.types.f32,
611 F64 => tcx.types.f64,
612 Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
617 /// Information about one scalar component of a Rust type.
618 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
620 pub value: Primitive,
622 /// Inclusive wrap-around range of valid values, that is, if
623 /// min > max, it represents min..=u128::MAX followed by 0..=max.
624 // FIXME(eddyb) always use the shortest range, e.g. by finding
625 // the largest space between two consecutive valid values and
626 // taking everything else as the (shortest) valid range.
627 pub valid_range: RangeInclusive<u128>,
631 pub fn is_bool(&self) -> bool {
632 if let Int(I8, _) = self.value {
633 self.valid_range == (0..=1)
640 /// The first half of a fat pointer.
641 /// - For a trait object, this is the address of the box.
642 /// - For a slice, this is the base address.
643 pub const FAT_PTR_ADDR: usize = 0;
645 /// The second half of a fat pointer.
646 /// - For a trait object, this is the address of the vtable.
647 /// - For a slice, this is the length.
648 pub const FAT_PTR_EXTRA: usize = 1;
650 /// Describes how the fields of a type are located in memory.
651 #[derive(PartialEq, Eq, Hash, Debug)]
652 pub enum FieldPlacement {
653 /// All fields start at no offset. The `usize` is the field count.
656 /// Array/vector-like placement, with all fields of identical types.
662 /// Struct-like placement, with precomputed offsets.
664 /// Fields are guaranteed to not overlap, but note that gaps
665 /// before, between and after all the fields are NOT always
666 /// padding, and as such their contents may not be discarded.
667 /// For example, enum variants leave a gap at the start,
668 /// where the discriminant field in the enum layout goes.
670 /// Offsets for the first byte of each field,
671 /// ordered to match the source definition order.
672 /// This vector does not go in increasing order.
673 // FIXME(eddyb) use small vector optimization for the common case.
676 /// Maps source order field indices to memory order indices,
677 /// depending how fields were permuted.
678 // FIXME(camlorn) also consider small vector optimization here.
679 memory_index: Vec<u32>
683 impl FieldPlacement {
684 pub fn count(&self) -> usize {
686 FieldPlacement::Union(count) => count,
687 FieldPlacement::Array { count, .. } => {
688 let usize_count = count as usize;
689 assert_eq!(usize_count as u64, count);
692 FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len()
696 pub fn offset(&self, i: usize) -> Size {
698 FieldPlacement::Union(_) => Size::from_bytes(0),
699 FieldPlacement::Array { stride, count } => {
704 FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i]
708 pub fn memory_index(&self, i: usize) -> usize {
710 FieldPlacement::Union(_) |
711 FieldPlacement::Array { .. } => i,
712 FieldPlacement::Arbitrary { ref memory_index, .. } => {
713 let r = memory_index[i];
714 assert_eq!(r as usize as u32, r);
720 /// Get source indices of the fields by increasing offsets.
722 pub fn index_by_increasing_offset<'a>(&'a self) -> impl iter::Iterator<Item=usize>+'a {
723 let mut inverse_small = [0u8; 64];
724 let mut inverse_big = vec![];
725 let use_small = self.count() <= inverse_small.len();
727 // We have to write this logic twice in order to keep the array small.
728 if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self {
730 for i in 0..self.count() {
731 inverse_small[memory_index[i] as usize] = i as u8;
734 inverse_big = vec![0; self.count()];
735 for i in 0..self.count() {
736 inverse_big[memory_index[i] as usize] = i as u32;
741 (0..self.count()).map(move |i| {
743 FieldPlacement::Union(_) |
744 FieldPlacement::Array { .. } => i,
745 FieldPlacement::Arbitrary { .. } => {
746 if use_small { inverse_small[i] as usize }
747 else { inverse_big[i] as usize }
754 /// Describes how values of the type are passed by target ABIs,
755 /// in terms of categories of C types there are ABI rules for.
756 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
760 ScalarPair(Scalar, Scalar),
763 /// If true, the size is exact, otherwise it's only a lower bound.
770 /// Returns true if the layout corresponds to an unsized type.
771 pub fn is_unsized(&self) -> bool {
775 Abi::ScalarPair(..) |
776 Abi::Vector => false,
777 Abi::Aggregate { sized, .. } => !sized
781 /// Returns true if the fields of the layout are packed.
782 pub fn is_packed(&self) -> bool {
786 Abi::ScalarPair(..) |
787 Abi::Vector => false,
788 Abi::Aggregate { packed, .. } => packed
793 #[derive(PartialEq, Eq, Hash, Debug)]
795 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
800 /// General-case enums: for each case there is a struct, and they all have
801 /// all space reserved for the discriminant, and their first field starts
802 /// at a non-0 offset, after where the discriminant would go.
805 variants: Vec<LayoutDetails>,
808 /// Multiple cases distinguished by a niche (values invalid for a type):
809 /// the variant `dataful_variant` contains a niche at an arbitrary
810 /// offset (field 0 of the enum), which for a variant with discriminant
811 /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`.
813 /// For example, `Option<(usize, &T)>` is represented such that
814 /// `None` has a null pointer for the second tuple field, and
815 /// `Some` is the identity function (with a non-null reference).
817 dataful_variant: usize,
818 niche_variants: RangeInclusive<usize>,
821 variants: Vec<LayoutDetails>,
825 #[derive(Copy, Clone, Debug)]
826 pub enum LayoutError<'tcx> {
828 SizeOverflow(Ty<'tcx>)
831 impl<'tcx> fmt::Display for LayoutError<'tcx> {
832 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
834 LayoutError::Unknown(ty) => {
835 write!(f, "the type `{:?}` has an unknown layout", ty)
837 LayoutError::SizeOverflow(ty) => {
838 write!(f, "the type `{:?}` is too big for the current architecture", ty)
844 #[derive(PartialEq, Eq, Hash, Debug)]
845 pub struct LayoutDetails {
846 pub variants: Variants,
847 pub fields: FieldPlacement,
854 fn scalar<C: HasDataLayout>(cx: C, scalar: Scalar) -> Self {
855 let size = scalar.value.size(cx);
856 let align = scalar.value.align(cx);
858 variants: Variants::Single { index: 0 },
859 fields: FieldPlacement::Union(0),
860 abi: Abi::Scalar(scalar),
866 fn uninhabited(field_count: usize) -> Self {
867 let align = Align::from_bytes(1, 1).unwrap();
869 variants: Variants::Single { index: 0 },
870 fields: FieldPlacement::Union(field_count),
871 abi: Abi::Uninhabited,
873 size: Size::from_bytes(0)
878 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
879 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
880 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
882 let (param_env, ty) = query.into_parts();
884 let rec_limit = tcx.sess.recursion_limit.get();
885 let depth = tcx.layout_depth.get();
886 if depth > rec_limit {
888 &format!("overflow representing the type `{}`", ty));
891 tcx.layout_depth.set(depth+1);
892 let layout = LayoutDetails::compute_uncached(tcx, param_env, ty);
893 tcx.layout_depth.set(depth);
898 pub fn provide(providers: &mut ty::maps::Providers) {
899 *providers = ty::maps::Providers {
905 impl<'a, 'tcx> LayoutDetails {
906 fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
907 param_env: ty::ParamEnv<'tcx>,
909 -> Result<&'tcx Self, LayoutError<'tcx>> {
910 let cx = (tcx, param_env);
911 let dl = cx.data_layout();
912 let scalar_unit = |value: Primitive| {
913 let bits = value.size(dl).bits();
914 assert!(bits <= 128);
917 valid_range: 0..=(!0 >> (128 - bits))
920 let scalar = |value: Primitive| {
921 tcx.intern_layout(LayoutDetails::scalar(cx, scalar_unit(value)))
923 let scalar_pair = |a: Scalar, b: Scalar| {
924 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
925 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
926 let size = (b_offset + b.value.size(dl)).abi_align(align);
928 variants: Variants::Single { index: 0 },
929 fields: FieldPlacement::Arbitrary {
930 offsets: vec![Size::from_bytes(0), b_offset],
931 memory_index: vec![0, 1]
933 abi: Abi::ScalarPair(a, b),
939 #[derive(Copy, Clone, Debug)]
941 /// A tuple, closure, or univariant which cannot be coerced to unsized.
943 /// A univariant, the last field of which may be coerced to unsized.
945 /// A univariant, but part of an enum.
946 EnumVariant(Integer),
948 let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
949 let packed = repr.packed();
950 if packed && repr.align > 0 {
951 bug!("struct cannot be packed and aligned");
954 let mut align = if packed {
960 let mut sized = true;
961 let mut offsets = vec![Size::from_bytes(0); fields.len()];
962 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
964 // Anything with repr(C) or repr(packed) doesn't optimize.
965 let optimize = match kind {
966 StructKind::AlwaysSized |
967 StructKind::MaybeUnsized |
968 StructKind::EnumVariant(I8) => {
969 (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty()
971 StructKind::EnumVariant(_) => false
974 let end = if let StructKind::MaybeUnsized = kind {
979 let optimizing = &mut inverse_memory_index[..end];
981 StructKind::AlwaysSized |
982 StructKind::MaybeUnsized => {
983 optimizing.sort_by_key(|&x| {
984 // Place ZSTs first to avoid "interesting offsets",
985 // especially with only one or two non-ZST fields.
986 let f = &fields[x as usize];
987 (!f.is_zst(), cmp::Reverse(f.align.abi()))
990 StructKind::EnumVariant(_) => {
991 optimizing.sort_by_key(|&x| fields[x as usize].align.abi());
996 // inverse_memory_index holds field indices by increasing memory offset.
997 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
998 // We now write field offsets to the corresponding offset slot;
999 // field 5 with offset 0 puts 0 in offsets[5].
1000 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
1002 let mut offset = Size::from_bytes(0);
1004 if let StructKind::EnumVariant(discr) = kind {
1005 offset = discr.size();
1007 let discr_align = discr.align(dl);
1008 align = align.max(discr_align);
1012 for &i in &inverse_memory_index {
1013 let field = fields[i as usize];
1015 bug!("univariant: field #{} of `{}` comes after unsized field",
1019 if field.abi == Abi::Uninhabited {
1020 return Ok(LayoutDetails::uninhabited(fields.len()));
1023 if field.is_unsized() {
1027 // Invariant: offset < dl.obj_size_bound() <= 1<<61
1029 offset = offset.abi_align(field.align);
1030 align = align.max(field.align);
1033 debug!("univariant offset: {:?} field: {:#?}", offset, field);
1034 offsets[i as usize] = offset;
1036 offset = offset.checked_add(field.size, dl)
1037 .ok_or(LayoutError::SizeOverflow(ty))?;
1041 let repr_align = repr.align as u64;
1042 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
1043 debug!("univariant repr_align: {:?}", repr_align);
1046 debug!("univariant min_size: {:?}", offset);
1047 let min_size = offset;
1049 // As stated above, inverse_memory_index holds field indices by increasing offset.
1050 // This makes it an already-sorted view of the offsets vec.
1051 // To invert it, consider:
1052 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1053 // Field 5 would be the first element, so memory_index is i:
1054 // Note: if we didn't optimize, it's already right.
1056 let mut memory_index;
1058 memory_index = vec![0; inverse_memory_index.len()];
1060 for i in 0..inverse_memory_index.len() {
1061 memory_index[inverse_memory_index[i] as usize] = i as u32;
1064 memory_index = inverse_memory_index;
1067 let size = min_size.abi_align(align);
1068 let mut abi = Abi::Aggregate {
1073 // Unpack newtype ABIs and find scalar pairs.
1074 if sized && size.bytes() > 0 {
1075 // All other fields must be ZSTs, and we need them to all start at 0.
1076 let mut zst_offsets =
1077 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
1078 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
1079 let mut non_zst_fields =
1080 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
1082 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1083 // We have exactly one non-ZST field.
1084 (Some((i, field)), None, None) => {
1085 // Field fills the struct and it has a scalar or scalar pair ABI.
1086 if offsets[i].bytes() == 0 && size == field.size {
1088 // For plain scalars we can't unpack newtypes
1089 // for `#[repr(C)]`, as that affects C ABIs.
1090 Abi::Scalar(_) if optimize => {
1091 abi = field.abi.clone();
1093 // But scalar pairs are Rust-specific and get
1094 // treated as aggregates by C ABIs anyway.
1095 Abi::ScalarPair(..) => {
1096 abi = field.abi.clone();
1103 // Two non-ZST fields, and they're both scalars.
1104 (Some((i, &TyLayout {
1105 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
1106 })), Some((j, &TyLayout {
1107 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
1109 // Order by the memory placement, not source order.
1110 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1115 let pair = scalar_pair(a.clone(), b.clone());
1116 let pair_offsets = match pair.fields {
1117 FieldPlacement::Arbitrary {
1121 assert_eq!(memory_index, &[0, 1]);
1126 if offsets[i] == pair_offsets[0] &&
1127 offsets[j] == pair_offsets[1] &&
1128 align == pair.align &&
1130 // We can use `ScalarPair` only when it matches our
1131 // already computed layout (including `#[repr(C)]`).
1142 variants: Variants::Single { index: 0 },
1143 fields: FieldPlacement::Arbitrary {
1152 let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
1153 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
1155 assert!(!ty.has_infer_types());
1160 tcx.intern_layout(LayoutDetails::scalar(cx, Scalar {
1161 value: Int(I8, false),
1166 tcx.intern_layout(LayoutDetails::scalar(cx, Scalar {
1167 value: Int(I32, false),
1168 valid_range: 0..=0x10FFFF
1172 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
1174 ty::TyUint(ity) => {
1175 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
1177 ty::TyFloat(FloatTy::F32) => scalar(F32),
1178 ty::TyFloat(FloatTy::F64) => scalar(F64),
1180 let mut ptr = scalar_unit(Pointer);
1181 ptr.valid_range.start = 1;
1182 tcx.intern_layout(LayoutDetails::scalar(cx, ptr))
1187 tcx.intern_layout(LayoutDetails::uninhabited(0))
1190 // Potentially-fat pointers.
1191 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1192 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1193 let mut data_ptr = scalar_unit(Pointer);
1194 if !ty.is_unsafe_ptr() {
1195 data_ptr.valid_range.start = 1;
1198 let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
1199 if pointee.is_sized(tcx, param_env, DUMMY_SP) {
1200 return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr)));
1203 let unsized_part = tcx.struct_tail(pointee);
1204 let metadata = match unsized_part.sty {
1205 ty::TyForeign(..) => {
1206 return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr)));
1208 ty::TySlice(_) | ty::TyStr => {
1209 scalar_unit(Int(dl.ptr_sized_integer(), false))
1211 ty::TyDynamic(..) => {
1212 let mut vtable = scalar_unit(Pointer);
1213 vtable.valid_range.start = 1;
1216 _ => return Err(LayoutError::Unknown(unsized_part))
1219 // Effectively a (ptr, meta) tuple.
1220 tcx.intern_layout(scalar_pair(data_ptr, metadata))
1223 // Arrays and slices.
1224 ty::TyArray(element, mut count) => {
1225 if count.has_projections() {
1226 count = tcx.normalize_associated_type_in_env(&count, param_env);
1227 if count.has_projections() {
1228 return Err(LayoutError::Unknown(ty));
1232 let element = cx.layout_of(element)?;
1233 let count = count.val.to_const_int().unwrap().to_u64().unwrap();
1234 let size = element.size.checked_mul(count, dl)
1235 .ok_or(LayoutError::SizeOverflow(ty))?;
1237 tcx.intern_layout(LayoutDetails {
1238 variants: Variants::Single { index: 0 },
1239 fields: FieldPlacement::Array {
1240 stride: element.size,
1243 abi: Abi::Aggregate {
1247 align: element.align,
1251 ty::TySlice(element) => {
1252 let element = cx.layout_of(element)?;
1253 tcx.intern_layout(LayoutDetails {
1254 variants: Variants::Single { index: 0 },
1255 fields: FieldPlacement::Array {
1256 stride: element.size,
1259 abi: Abi::Aggregate {
1263 align: element.align,
1264 size: Size::from_bytes(0)
1268 tcx.intern_layout(LayoutDetails {
1269 variants: Variants::Single { index: 0 },
1270 fields: FieldPlacement::Array {
1271 stride: Size::from_bytes(1),
1274 abi: Abi::Aggregate {
1279 size: Size::from_bytes(0)
1284 ty::TyFnDef(..) => {
1285 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
1287 ty::TyDynamic(..) | ty::TyForeign(..) => {
1288 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
1289 StructKind::AlwaysSized)?;
1291 Abi::Aggregate { ref mut sized, .. } => *sized = false,
1294 tcx.intern_layout(unit)
1297 // Tuples, generators and closures.
1298 ty::TyGenerator(def_id, ref substs, _) => {
1299 let tys = substs.field_tys(def_id, tcx);
1300 univariant(&tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1301 &ReprOptions::default(),
1302 StructKind::AlwaysSized)?
1305 ty::TyClosure(def_id, ref substs) => {
1306 let tys = substs.upvar_tys(def_id, tcx);
1307 univariant(&tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1308 &ReprOptions::default(),
1309 StructKind::AlwaysSized)?
1312 ty::TyTuple(tys, _) => {
1313 let kind = if tys.len() == 0 {
1314 StructKind::AlwaysSized
1316 StructKind::MaybeUnsized
1319 univariant(&tys.iter().map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1320 &ReprOptions::default(), kind)?
1323 // SIMD vector types.
1324 ty::TyAdt(def, ..) if def.repr.simd() => {
1325 let count = ty.simd_size(tcx) as u64;
1326 let element = cx.layout_of(ty.simd_type(tcx))?;
1328 Abi::Scalar(_) => {}
1330 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1331 a non-machine element type `{}`",
1335 let size = element.size.checked_mul(count, dl)
1336 .ok_or(LayoutError::SizeOverflow(ty))?;
1337 let align = dl.vector_align(size);
1338 let size = size.abi_align(align);
1340 tcx.intern_layout(LayoutDetails {
1341 variants: Variants::Single { index: 0 },
1342 fields: FieldPlacement::Array {
1343 stride: element.size,
1353 ty::TyAdt(def, substs) => {
1354 // Cache the field layouts.
1355 let variants = def.variants.iter().map(|v| {
1356 v.fields.iter().map(|field| {
1357 cx.layout_of(field.ty(tcx, substs))
1358 }).collect::<Result<Vec<_>, _>>()
1359 }).collect::<Result<Vec<_>, _>>()?;
1361 let (inh_first, inh_second) = {
1362 let mut inh_variants = (0..variants.len()).filter(|&v| {
1363 variants[v].iter().all(|f| f.abi != Abi::Uninhabited)
1365 (inh_variants.next(), inh_variants.next())
1367 if inh_first.is_none() {
1368 // Uninhabited because it has no variants, or only uninhabited ones.
1369 return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0)));
1373 let packed = def.repr.packed();
1374 if packed && def.repr.align > 0 {
1375 bug!("Union cannot be packed and aligned");
1378 let mut align = if def.repr.packed() {
1384 if def.repr.align > 0 {
1385 let repr_align = def.repr.align as u64;
1387 Align::from_bytes(repr_align, repr_align).unwrap());
1390 let mut size = Size::from_bytes(0);
1391 for field in &variants[0] {
1392 assert!(!field.is_unsized());
1395 align = align.max(field.align);
1397 size = cmp::max(size, field.size);
1400 return Ok(tcx.intern_layout(LayoutDetails {
1401 variants: Variants::Single { index: 0 },
1402 fields: FieldPlacement::Union(variants[0].len()),
1403 abi: Abi::Aggregate {
1408 size: size.abi_align(align)
1412 let is_struct = !def.is_enum() ||
1413 // Only one variant is inhabited.
1414 (inh_second.is_none() &&
1415 // Representation optimizations are allowed.
1416 !def.repr.inhibit_enum_layout_opt() &&
1417 // Inhabited variant either has data ...
1418 (!variants[inh_first.unwrap()].is_empty() ||
1419 // ... or there other, uninhabited, variants.
1420 variants.len() > 1));
1422 // Struct, or univariant enum equivalent to a struct.
1423 // (Typechecking will reject discriminant-sizing attrs.)
1425 let v = inh_first.unwrap();
1426 let kind = if def.is_enum() || variants[v].len() == 0 {
1427 StructKind::AlwaysSized
1429 let param_env = tcx.param_env(def.did);
1430 let last_field = def.variants[v].fields.last().unwrap();
1431 let always_sized = tcx.type_of(last_field.did)
1432 .is_sized(tcx, param_env, DUMMY_SP);
1433 if !always_sized { StructKind::MaybeUnsized }
1434 else { StructKind::AlwaysSized }
1437 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
1438 st.variants = Variants::Single { index: v };
1439 // Exclude 0 from the range of a newtype ABI NonZero<T>.
1440 if Some(def.did) == cx.tcx().lang_items().non_zero() {
1442 Abi::Scalar(ref mut scalar) |
1443 Abi::ScalarPair(ref mut scalar, _) => {
1444 if scalar.valid_range.start == 0 {
1445 scalar.valid_range.start = 1;
1451 return Ok(tcx.intern_layout(st));
1454 let no_explicit_discriminants = def.variants.iter().enumerate()
1455 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
1457 // Niche-filling enum optimization.
1458 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1459 let mut dataful_variant = None;
1460 let mut niche_variants = usize::max_value()..=0;
1462 // Find one non-ZST variant.
1463 'variants: for (v, fields) in variants.iter().enumerate() {
1465 if f.abi == Abi::Uninhabited {
1469 if dataful_variant.is_none() {
1470 dataful_variant = Some(v);
1473 dataful_variant = None;
1478 if niche_variants.start > v {
1479 niche_variants.start = v;
1481 niche_variants.end = v;
1484 if niche_variants.start > niche_variants.end {
1485 dataful_variant = None;
1488 if let Some(i) = dataful_variant {
1489 let count = (niche_variants.end - niche_variants.start + 1) as u128;
1490 for (field_index, field) in variants[i].iter().enumerate() {
1491 let (offset, niche, niche_start) =
1492 match field.find_niche(cx, count)? {
1493 Some(niche) => niche,
1496 let st = variants.iter().enumerate().map(|(j, v)| {
1497 let mut st = univariant_uninterned(v,
1498 &def.repr, StructKind::AlwaysSized)?;
1499 st.variants = Variants::Single { index: j };
1501 }).collect::<Result<Vec<_>, _>>()?;
1503 let offset = st[i].fields.offset(field_index) + offset;
1504 let LayoutDetails { size, mut align, .. } = st[i];
1506 let mut niche_align = niche.value.align(dl);
1507 let abi = if offset.bytes() == 0 && niche.value.size(dl) == size {
1508 Abi::Scalar(niche.clone())
1510 let mut packed = st[i].abi.is_packed();
1511 if offset.abi_align(niche_align) != offset {
1513 niche_align = dl.i8_align;
1520 align = align.max(niche_align);
1522 return Ok(tcx.intern_layout(LayoutDetails {
1523 variants: Variants::NicheFilling {
1530 fields: FieldPlacement::Arbitrary {
1531 offsets: vec![offset],
1532 memory_index: vec![0]
1542 let (mut min, mut max) = (i128::max_value(), i128::min_value());
1543 for (i, discr) in def.discriminants(tcx).enumerate() {
1544 if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
1547 let x = discr.to_u128_unchecked() as i128;
1548 if x < min { min = x; }
1549 if x > max { max = x; }
1551 assert!(min <= max, "discriminant range is {}...{}", min, max);
1552 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1554 let mut align = dl.aggregate_align;
1555 let mut size = Size::from_bytes(0);
1557 // We're interested in the smallest alignment, so start large.
1558 let mut start_align = Align::from_bytes(256, 256).unwrap();
1559 assert_eq!(Integer::for_abi_align(dl, start_align), None);
1561 // Create the set of structs that represent each variant.
1562 let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| {
1563 let mut st = univariant_uninterned(&field_layouts,
1564 &def.repr, StructKind::EnumVariant(min_ity))?;
1565 st.variants = Variants::Single { index: i };
1566 // Find the first field we can't move later
1567 // to make room for a larger discriminant.
1568 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1569 if !field.is_zst() || field.align.abi() != 1 {
1570 start_align = start_align.min(field.align);
1574 size = cmp::max(size, st.size);
1575 align = align.max(st.align);
1577 }).collect::<Result<Vec<_>, _>>()?;
1579 // Align the maximum variant size to the largest alignment.
1580 size = size.abi_align(align);
1582 if size.bytes() >= dl.obj_size_bound() {
1583 return Err(LayoutError::SizeOverflow(ty));
1586 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1587 if typeck_ity < min_ity {
1588 // It is a bug if Layout decided on a greater discriminant size than typeck for
1589 // some reason at this point (based on values discriminant can take on). Mostly
1590 // because this discriminant will be loaded, and then stored into variable of
1591 // type calculated by typeck. Consider such case (a bug): typeck decided on
1592 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1593 // discriminant values. That would be a bug, because then, in trans, in order
1594 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1595 // space necessary to represent would have to be discarded (or layout is wrong
1596 // on thinking it needs 16 bits)
1597 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1598 min_ity, typeck_ity);
1599 // However, it is fine to make discr type however large (as an optimisation)
1600 // after this point – we’ll just truncate the value we load in trans.
1603 // Check to see if we should use a different type for the
1604 // discriminant. We can safely use a type with the same size
1605 // as the alignment of the first field of each variant.
1606 // We increase the size of the discriminant to avoid LLVM copying
1607 // padding when it doesn't need to. This normally causes unaligned
1608 // load/stores and excessive memcpy/memset operations. By using a
1609 // bigger integer size, LLVM can be sure about it's contents and
1610 // won't be so conservative.
1612 // Use the initial field alignment
1613 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
1615 // If the alignment is not larger than the chosen discriminant size,
1616 // don't use the alignment as the final size.
1620 // Patch up the variants' first few fields.
1621 let old_ity_size = min_ity.size();
1622 let new_ity_size = ity.size();
1623 for variant in &mut variants {
1624 if variant.abi == Abi::Uninhabited {
1627 match variant.fields {
1628 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1630 if *i <= old_ity_size {
1631 assert_eq!(*i, old_ity_size);
1635 // We might be making the struct larger.
1636 if variant.size <= old_ity_size {
1637 variant.size = new_ity_size;
1645 let discr = Scalar {
1646 value: Int(ity, signed),
1647 valid_range: (min as u128)..=(max as u128)
1649 let abi = if discr.value.size(dl) == size {
1650 Abi::Scalar(discr.clone())
1657 tcx.intern_layout(LayoutDetails {
1658 variants: Variants::Tagged {
1662 // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results
1663 // in lost optimizations, specifically around allocations, see
1664 // `test/codegen/{alloc-optimisation,vec-optimizes-away}.rs`.
1665 fields: FieldPlacement::Union(1),
1672 // Types with no meaningful known layout.
1673 ty::TyProjection(_) | ty::TyAnon(..) => {
1674 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
1675 if ty == normalized {
1676 return Err(LayoutError::Unknown(ty));
1678 tcx.layout_raw(param_env.and(normalized))?
1681 return Err(LayoutError::Unknown(ty));
1683 ty::TyInfer(_) | ty::TyError => {
1684 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1689 /// This is invoked by the `layout_raw` query to record the final
1690 /// layout of each type.
1692 fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1694 param_env: ty::ParamEnv<'tcx>,
1695 layout: TyLayout<'tcx>) {
1696 // If we are running with `-Zprint-type-sizes`, record layouts for
1697 // dumping later. Ignore layouts that are done with non-empty
1698 // environments or non-monomorphic layouts, as the user only wants
1699 // to see the stuff resulting from the final trans session.
1701 !tcx.sess.opts.debugging_opts.print_type_sizes ||
1702 ty.has_param_types() ||
1704 !param_env.caller_bounds.is_empty()
1709 Self::record_layout_for_printing_outlined(tcx, ty, param_env, layout)
1712 fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1714 param_env: ty::ParamEnv<'tcx>,
1715 layout: TyLayout<'tcx>) {
1716 let cx = (tcx, param_env);
1717 // (delay format until we actually need it)
1718 let record = |kind, opt_discr_size, variants| {
1719 let type_desc = format!("{:?}", ty);
1720 tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1728 let adt_def = match ty.sty {
1729 ty::TyAdt(ref adt_def, _) => {
1730 debug!("print-type-size t: `{:?}` process adt", ty);
1734 ty::TyClosure(..) => {
1735 debug!("print-type-size t: `{:?}` record closure", ty);
1736 record(DataTypeKind::Closure, None, vec![]);
1741 debug!("print-type-size t: `{:?}` skip non-nominal", ty);
1746 let adt_kind = adt_def.adt_kind();
1748 let build_variant_info = |n: Option<ast::Name>,
1750 layout: TyLayout<'tcx>| {
1751 let mut min_size = Size::from_bytes(0);
1752 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1753 match layout.field(cx, i) {
1755 bug!("no layout found for field {}: `{:?}`", name, err);
1757 Ok(field_layout) => {
1758 let offset = layout.fields.offset(i);
1759 let field_end = offset + field_layout.size;
1760 if min_size < field_end {
1761 min_size = field_end;
1763 session::FieldInfo {
1764 name: name.to_string(),
1765 offset: offset.bytes(),
1766 size: field_layout.size.bytes(),
1767 align: field_layout.align.abi(),
1773 session::VariantInfo {
1774 name: n.map(|n|n.to_string()),
1775 kind: if layout.is_unsized() {
1776 session::SizeKind::Min
1778 session::SizeKind::Exact
1780 align: layout.align.abi(),
1781 size: if min_size.bytes() == 0 {
1790 match layout.variants {
1791 Variants::Single { index } => {
1792 debug!("print-type-size `{:#?}` variant {}",
1793 layout, adt_def.variants[index].name);
1794 if !adt_def.variants.is_empty() {
1795 let variant_def = &adt_def.variants[index];
1796 let fields: Vec<_> =
1797 variant_def.fields.iter().map(|f| f.name).collect();
1798 record(adt_kind.into(),
1800 vec![build_variant_info(Some(variant_def.name),
1804 // (This case arises for *empty* enums; so give it
1806 record(adt_kind.into(), None, vec![]);
1810 Variants::NicheFilling { .. } |
1811 Variants::Tagged { .. } => {
1812 debug!("print-type-size `{:#?}` adt general variants def {}",
1813 ty, adt_def.variants.len());
1814 let variant_infos: Vec<_> =
1815 adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1816 let fields: Vec<_> =
1817 variant_def.fields.iter().map(|f| f.name).collect();
1818 build_variant_info(Some(variant_def.name),
1820 layout.for_variant(cx, i))
1823 record(adt_kind.into(), match layout.variants {
1824 Variants::Tagged { ref discr, .. } => Some(discr.value.size(tcx)),
1832 /// Type size "skeleton", i.e. the only information determining a type's size.
1833 /// While this is conservative, (aside from constant sizes, only pointers,
1834 /// newtypes thereof and null pointer optimized enums are allowed), it is
1835 /// enough to statically check common usecases of transmute.
1836 #[derive(Copy, Clone, Debug)]
1837 pub enum SizeSkeleton<'tcx> {
1838 /// Any statically computable Layout.
1841 /// A potentially-fat pointer.
1843 /// If true, this pointer is never null.
1845 /// The type which determines the unsized metadata, if any,
1846 /// of this pointer. Either a type parameter or a projection
1847 /// depending on one, with regions erased.
1852 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1853 pub fn compute(ty: Ty<'tcx>,
1854 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1855 param_env: ty::ParamEnv<'tcx>)
1856 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1857 assert!(!ty.has_infer_types());
1859 // First try computing a static layout.
1860 let err = match (tcx, param_env).layout_of(ty) {
1862 return Ok(SizeSkeleton::Known(layout.size));
1868 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1869 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1870 let non_zero = !ty.is_unsafe_ptr();
1871 let tail = tcx.struct_tail(pointee);
1873 ty::TyParam(_) | ty::TyProjection(_) => {
1874 assert!(tail.has_param_types() || tail.has_self_ty());
1875 Ok(SizeSkeleton::Pointer {
1877 tail: tcx.erase_regions(&tail)
1881 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1882 tail `{}` is not a type parameter or a projection",
1888 ty::TyAdt(def, substs) => {
1889 // Only newtypes and enums w/ nullable pointer optimization.
1890 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1894 // Get a zero-sized variant or a pointer newtype.
1895 let zero_or_ptr_variant = |i: usize| {
1896 let fields = def.variants[i].fields.iter().map(|field| {
1897 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1900 for field in fields {
1903 SizeSkeleton::Known(size) => {
1904 if size.bytes() > 0 {
1908 SizeSkeleton::Pointer {..} => {
1919 let v0 = zero_or_ptr_variant(0)?;
1921 if def.variants.len() == 1 {
1922 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1923 return Ok(SizeSkeleton::Pointer {
1924 non_zero: non_zero ||
1925 Some(def.did) == tcx.lang_items().non_zero(),
1933 let v1 = zero_or_ptr_variant(1)?;
1934 // Nullable pointer enum optimization.
1936 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1937 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1938 Ok(SizeSkeleton::Pointer {
1947 ty::TyProjection(_) | ty::TyAnon(..) => {
1948 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
1949 if ty == normalized {
1952 SizeSkeleton::compute(normalized, tcx, param_env)
1960 pub fn same_size(self, other: SizeSkeleton) -> bool {
1961 match (self, other) {
1962 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1963 (SizeSkeleton::Pointer { tail: a, .. },
1964 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1970 /// The details of the layout of a type, alongside the type itself.
1971 /// Provides various type traversal APIs (e.g. recursing into fields).
1973 /// Note that the details are NOT guaranteed to always be identical
1974 /// to those obtained from `layout_of(ty)`, as we need to produce
1975 /// layouts for which Rust types do not exist, such as enum variants
1976 /// or synthetic fields of enums (i.e. discriminants) and fat pointers.
1977 #[derive(Copy, Clone, Debug)]
1978 pub struct TyLayout<'tcx> {
1980 details: &'tcx LayoutDetails
1983 impl<'tcx> Deref for TyLayout<'tcx> {
1984 type Target = &'tcx LayoutDetails;
1985 fn deref(&self) -> &&'tcx LayoutDetails {
1990 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1991 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1994 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1995 fn data_layout(&self) -> &TargetDataLayout {
2000 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
2001 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
2006 impl<'a, 'gcx, 'tcx, T: Copy> HasDataLayout for (TyCtxt<'a, 'gcx, 'tcx>, T) {
2007 fn data_layout(&self) -> &TargetDataLayout {
2008 self.0.data_layout()
2012 impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) {
2013 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
2018 pub trait MaybeResult<T> {
2019 fn from_ok(x: T) -> Self;
2020 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
2023 impl<T> MaybeResult<T> for T {
2024 fn from_ok(x: T) -> Self {
2027 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
2032 impl<T, E> MaybeResult<T> for Result<T, E> {
2033 fn from_ok(x: T) -> Self {
2036 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
2041 pub trait LayoutOf<T> {
2044 fn layout_of(self, ty: T) -> Self::TyLayout;
2047 impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) {
2048 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
2050 /// Computes the layout of a type. Note that this implicitly
2051 /// executes in "reveal all" mode.
2053 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
2054 let (tcx, param_env) = self;
2056 let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
2057 let details = tcx.layout_raw(param_env.reveal_all().and(ty))?;
2058 let layout = TyLayout {
2063 // NB: This recording is normally disabled; when enabled, it
2064 // can however trigger recursive invocations of `layout_of`.
2065 // Therefore, we execute it *after* the main query has
2066 // completed, to avoid problems around recursive structures
2067 // and the like. (Admitedly, I wasn't able to reproduce a problem
2068 // here, but it seems like the right thing to do. -nmatsakis)
2069 LayoutDetails::record_layout_for_printing(tcx, ty, param_env, layout);
2075 impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>,
2076 ty::ParamEnv<'tcx>) {
2077 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
2079 /// Computes the layout of a type. Note that this implicitly
2080 /// executes in "reveal all" mode.
2082 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
2083 let (tcx_at, param_env) = self;
2085 let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
2086 let details = tcx_at.layout_raw(param_env.reveal_all().and(ty))?;
2087 let layout = TyLayout {
2092 // NB: This recording is normally disabled; when enabled, it
2093 // can however trigger recursive invocations of `layout_of`.
2094 // Therefore, we execute it *after* the main query has
2095 // completed, to avoid problems around recursive structures
2096 // and the like. (Admitedly, I wasn't able to reproduce a problem
2097 // here, but it seems like the right thing to do. -nmatsakis)
2098 LayoutDetails::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout);
2104 impl<'a, 'tcx> TyLayout<'tcx> {
2105 pub fn for_variant<C>(&self, cx: C, variant_index: usize) -> Self
2106 where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
2107 C::TyLayout: MaybeResult<TyLayout<'tcx>>
2109 let details = match self.variants {
2110 Variants::Single { index } if index == variant_index => self.details,
2112 Variants::Single { index } => {
2113 // Deny calling for_variant more than once for non-Single enums.
2114 cx.layout_of(self.ty).map_same(|layout| {
2115 assert_eq!(layout.variants, Variants::Single { index });
2119 let fields = match self.ty.sty {
2120 ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
2123 let mut details = LayoutDetails::uninhabited(fields);
2124 details.variants = Variants::Single { index: variant_index };
2125 cx.tcx().intern_layout(details)
2128 Variants::NicheFilling { ref variants, .. } |
2129 Variants::Tagged { ref variants, .. } => {
2130 &variants[variant_index]
2134 assert_eq!(details.variants, Variants::Single { index: variant_index });
2142 pub fn field<C>(&self, cx: C, i: usize) -> C::TyLayout
2143 where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
2144 C::TyLayout: MaybeResult<TyLayout<'tcx>>
2147 cx.layout_of(match self.ty.sty {
2157 ty::TyForeign(..) => {
2158 bug!("TyLayout::field_type({:?}): not applicable", self)
2161 // Potentially-fat pointers.
2162 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2163 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2166 // Reuse the fat *T type as its own thin pointer data field.
2167 // This provides information about e.g. DST struct pointees
2168 // (which may have no non-DST form), and will work as long
2169 // as the `Abi` or `FieldPlacement` is checked by users.
2171 let nil = tcx.mk_nil();
2172 let ptr_ty = if self.ty.is_unsafe_ptr() {
2175 tcx.mk_mut_ref(tcx.types.re_static, nil)
2177 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
2178 ptr_layout.ty = self.ty;
2183 match tcx.struct_tail(pointee).sty {
2185 ty::TyStr => tcx.types.usize,
2186 ty::TyDynamic(..) => {
2187 // FIXME(eddyb) use an usize/fn() array with
2188 // the correct number of vtables slots.
2189 tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
2191 _ => bug!("TyLayout::field_type({:?}): not applicable", self)
2195 // Arrays and slices.
2196 ty::TyArray(element, _) |
2197 ty::TySlice(element) => element,
2198 ty::TyStr => tcx.types.u8,
2200 // Tuples, generators and closures.
2201 ty::TyClosure(def_id, ref substs) => {
2202 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2205 ty::TyGenerator(def_id, ref substs, _) => {
2206 substs.field_tys(def_id, tcx).nth(i).unwrap()
2209 ty::TyTuple(tys, _) => tys[i],
2211 // SIMD vector types.
2212 ty::TyAdt(def, ..) if def.repr.simd() => {
2213 self.ty.simd_type(tcx)
2217 ty::TyAdt(def, substs) => {
2218 match self.variants {
2219 Variants::Single { index } => {
2220 def.variants[index].fields[i].ty(tcx, substs)
2223 // Discriminant field for enums (where applicable).
2224 Variants::Tagged { ref discr, .. } |
2225 Variants::NicheFilling { niche: ref discr, .. } => {
2227 let layout = LayoutDetails::scalar(tcx, discr.clone());
2228 return MaybeResult::from_ok(TyLayout {
2229 details: tcx.intern_layout(layout),
2230 ty: discr.value.to_ty(tcx)
2236 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
2237 ty::TyInfer(_) | ty::TyError => {
2238 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
2243 /// Returns true if the layout corresponds to an unsized type.
2244 pub fn is_unsized(&self) -> bool {
2245 self.abi.is_unsized()
2248 /// Returns true if the fields of the layout are packed.
2249 pub fn is_packed(&self) -> bool {
2250 self.abi.is_packed()
2253 /// Returns true if the type is a ZST and not unsized.
2254 pub fn is_zst(&self) -> bool {
2256 Abi::Uninhabited => true,
2257 Abi::Scalar(_) | Abi::ScalarPair(..) => false,
2258 Abi::Vector => self.size.bytes() == 0,
2259 Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0
2263 pub fn size_and_align(&self) -> (Size, Align) {
2264 (self.size, self.align)
2267 /// Find the offset of a niche leaf field, starting from
2268 /// the given type and recursing through aggregates, which
2269 /// has at least `count` consecutive invalid values.
2270 /// The tuple is `(offset, scalar, niche_value)`.
2271 // FIXME(eddyb) traverse already optimized enums.
2272 fn find_niche<C>(&self, cx: C, count: u128)
2273 -> Result<Option<(Size, Scalar, u128)>, LayoutError<'tcx>>
2274 where C: LayoutOf<Ty<'tcx>, TyLayout = Result<Self, LayoutError<'tcx>>> +
2277 let scalar_component = |scalar: &Scalar, offset| {
2278 let Scalar { value, valid_range: ref v } = *scalar;
2280 let bits = value.size(cx).bits();
2281 assert!(bits <= 128);
2282 let max_value = !0u128 >> (128 - bits);
2284 // Find out how many values are outside the valid range.
2285 let niches = if v.start <= v.end {
2286 v.start + (max_value - v.end)
2291 // Give up if we can't fit `count` consecutive niches.
2296 let niche_start = v.end.wrapping_add(1) & max_value;
2297 let niche_end = v.end.wrapping_add(count) & max_value;
2298 Some((offset, Scalar {
2300 valid_range: v.start..=niche_end
2305 Abi::Scalar(ref scalar) => {
2306 return Ok(scalar_component(scalar, Size::from_bytes(0)));
2308 Abi::ScalarPair(ref a, ref b) => {
2309 return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| {
2310 scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx)))
2316 // Perhaps one of the fields is non-zero, let's recurse and find out.
2317 if let FieldPlacement::Union(_) = self.fields {
2318 // Only Rust enums have safe-to-inspect fields
2319 // (a discriminant), other unions are unsafe.
2320 if let Variants::Single { .. } = self.variants {
2324 if let FieldPlacement::Array { .. } = self.fields {
2325 if self.fields.count() > 0 {
2326 return self.field(cx, 0)?.find_niche(cx, count);
2329 for i in 0..self.fields.count() {
2330 let r = self.field(cx, i)?.find_niche(cx, count)?;
2331 if let Some((offset, scalar, niche_value)) = r {
2332 let offset = self.fields.offset(i) + offset;
2333 return Ok(Some((offset, scalar, niche_value)));
2340 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Variants {
2341 fn hash_stable<W: StableHasherResult>(&self,
2342 hcx: &mut StableHashingContext<'gcx>,
2343 hasher: &mut StableHasher<W>) {
2344 use ty::layout::Variants::*;
2345 mem::discriminant(self).hash_stable(hcx, hasher);
2348 Single { index } => {
2349 index.hash_stable(hcx, hasher);
2355 discr.hash_stable(hcx, hasher);
2356 variants.hash_stable(hcx, hasher);
2360 niche_variants: RangeInclusive { start, end },
2365 dataful_variant.hash_stable(hcx, hasher);
2366 start.hash_stable(hcx, hasher);
2367 end.hash_stable(hcx, hasher);
2368 niche.hash_stable(hcx, hasher);
2369 niche_start.hash_stable(hcx, hasher);
2370 variants.hash_stable(hcx, hasher);
2376 impl<'gcx> HashStable<StableHashingContext<'gcx>> for FieldPlacement {
2377 fn hash_stable<W: StableHasherResult>(&self,
2378 hcx: &mut StableHashingContext<'gcx>,
2379 hasher: &mut StableHasher<W>) {
2380 use ty::layout::FieldPlacement::*;
2381 mem::discriminant(self).hash_stable(hcx, hasher);
2385 count.hash_stable(hcx, hasher);
2387 Array { count, stride } => {
2388 count.hash_stable(hcx, hasher);
2389 stride.hash_stable(hcx, hasher);
2391 Arbitrary { ref offsets, ref memory_index } => {
2392 offsets.hash_stable(hcx, hasher);
2393 memory_index.hash_stable(hcx, hasher);
2399 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Abi {
2400 fn hash_stable<W: StableHasherResult>(&self,
2401 hcx: &mut StableHashingContext<'gcx>,
2402 hasher: &mut StableHasher<W>) {
2403 use ty::layout::Abi::*;
2404 mem::discriminant(self).hash_stable(hcx, hasher);
2408 Scalar(ref value) => {
2409 value.hash_stable(hcx, hasher);
2411 ScalarPair(ref a, ref b) => {
2412 a.hash_stable(hcx, hasher);
2413 b.hash_stable(hcx, hasher);
2416 Aggregate { packed, sized } => {
2417 packed.hash_stable(hcx, hasher);
2418 sized.hash_stable(hcx, hasher);
2424 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Scalar {
2425 fn hash_stable<W: StableHasherResult>(&self,
2426 hcx: &mut StableHashingContext<'gcx>,
2427 hasher: &mut StableHasher<W>) {
2428 let Scalar { value, valid_range: RangeInclusive { start, end } } = *self;
2429 value.hash_stable(hcx, hasher);
2430 start.hash_stable(hcx, hasher);
2431 end.hash_stable(hcx, hasher);
2435 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
2443 impl_stable_hash_for!(enum ::ty::layout::Integer {
2451 impl_stable_hash_for!(enum ::ty::layout::Primitive {
2452 Int(integer, signed),
2458 impl_stable_hash_for!(struct ::ty::layout::Align {
2463 impl_stable_hash_for!(struct ::ty::layout::Size {
2467 impl<'gcx> HashStable<StableHashingContext<'gcx>> for LayoutError<'gcx>
2469 fn hash_stable<W: StableHasherResult>(&self,
2470 hcx: &mut StableHashingContext<'gcx>,
2471 hasher: &mut StableHasher<W>) {
2472 use ty::layout::LayoutError::*;
2473 mem::discriminant(self).hash_stable(hcx, hasher);
2477 SizeOverflow(t) => t.hash_stable(hcx, hasher)