4 use crate::spec::Target;
6 use std::convert::{TryFrom, TryInto};
8 use std::num::NonZeroUsize;
9 use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub};
10 use std::str::FromStr;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_macros::HashStable_Generic;
14 use rustc_serialize::json::{Json, ToJson};
19 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
20 /// for a target, which contains everything needed to compute layouts.
21 pub struct TargetDataLayout {
23 pub i1_align: AbiAndPrefAlign,
24 pub i8_align: AbiAndPrefAlign,
25 pub i16_align: AbiAndPrefAlign,
26 pub i32_align: AbiAndPrefAlign,
27 pub i64_align: AbiAndPrefAlign,
28 pub i128_align: AbiAndPrefAlign,
29 pub f32_align: AbiAndPrefAlign,
30 pub f64_align: AbiAndPrefAlign,
31 pub pointer_size: Size,
32 pub pointer_align: AbiAndPrefAlign,
33 pub aggregate_align: AbiAndPrefAlign,
35 /// Alignments for vector types.
36 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
38 pub instruction_address_space: AddressSpace,
41 impl Default for TargetDataLayout {
42 /// Creates an instance of `TargetDataLayout`.
43 fn default() -> TargetDataLayout {
44 let align = |bits| Align::from_bits(bits).unwrap();
47 i1_align: AbiAndPrefAlign::new(align(8)),
48 i8_align: AbiAndPrefAlign::new(align(8)),
49 i16_align: AbiAndPrefAlign::new(align(16)),
50 i32_align: AbiAndPrefAlign::new(align(32)),
51 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
52 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
53 f32_align: AbiAndPrefAlign::new(align(32)),
54 f64_align: AbiAndPrefAlign::new(align(64)),
55 pointer_size: Size::from_bits(64),
56 pointer_align: AbiAndPrefAlign::new(align(64)),
57 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
59 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
60 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
62 instruction_address_space: AddressSpace::DATA,
67 impl TargetDataLayout {
68 pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
69 // Parse an address space index from a string.
70 let parse_address_space = |s: &str, cause: &str| {
71 s.parse::<u32>().map(AddressSpace).map_err(|err| {
72 format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
76 // Parse a bit count from a string.
77 let parse_bits = |s: &str, kind: &str, cause: &str| {
78 s.parse::<u64>().map_err(|err| {
79 format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
83 // Parse a size string.
84 let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
86 // Parse an alignment string.
87 let align = |s: &[&str], cause: &str| {
89 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
91 let align_from_bits = |bits| {
92 Align::from_bits(bits).map_err(|err| {
93 format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
96 let abi = parse_bits(s[0], "alignment", cause)?;
97 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
98 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
101 let mut dl = TargetDataLayout::default();
102 let mut i128_align_src = 64;
103 for spec in target.data_layout.split('-') {
104 let spec_parts = spec.split(':').collect::<Vec<_>>();
107 ["e"] => dl.endian = Endian::Little,
108 ["E"] => dl.endian = Endian::Big,
109 [p] if p.starts_with('P') => {
110 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
112 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
113 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
114 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
115 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
116 dl.pointer_size = size(s, p)?;
117 dl.pointer_align = align(a, p)?;
119 [s, ref a @ ..] if s.starts_with('i') => {
120 let bits = match s[1..].parse::<u64>() {
123 size(&s[1..], "i")?; // For the user error.
127 let a = align(a, s)?;
129 1 => dl.i1_align = a,
130 8 => dl.i8_align = a,
131 16 => dl.i16_align = a,
132 32 => dl.i32_align = a,
133 64 => dl.i64_align = a,
136 if bits >= i128_align_src && bits <= 128 {
137 // Default alignment for i128 is decided by taking the alignment of
138 // largest-sized i{64..=128}.
139 i128_align_src = bits;
143 [s, ref a @ ..] if s.starts_with('v') => {
144 let v_size = size(&s[1..], "v")?;
145 let a = align(a, s)?;
146 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
150 // No existing entry, add a new one.
151 dl.vector_align.push((v_size, a));
153 _ => {} // Ignore everything else.
157 // Perform consistency checks against the Target information.
158 if dl.endian != target.endian {
160 "inconsistent target specification: \"data-layout\" claims \
161 architecture is {}-endian, while \"target-endian\" is `{}`",
163 target.endian.as_str(),
167 if dl.pointer_size.bits() != target.pointer_width.into() {
169 "inconsistent target specification: \"data-layout\" claims \
170 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
171 dl.pointer_size.bits(),
179 /// Returns exclusive upper bound on object size.
181 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
182 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
183 /// index every address within an object along with one byte past the end, along with allowing
184 /// `isize` to store the difference between any two pointers into an object.
186 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
187 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
188 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
189 /// address space on 64-bit ARMv8 and x86_64.
190 pub fn obj_size_bound(&self) -> u64 {
191 match self.pointer_size.bits() {
195 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
199 pub fn ptr_sized_integer(&self) -> Integer {
200 match self.pointer_size.bits() {
204 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
208 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
209 for &(size, align) in &self.vector_align {
210 if size == vec_size {
214 // Default to natural alignment, which is what LLVM does.
215 // That is, use the size, rounded up to a power of 2.
216 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
220 pub trait HasDataLayout {
221 fn data_layout(&self) -> &TargetDataLayout;
224 impl HasDataLayout for TargetDataLayout {
226 fn data_layout(&self) -> &TargetDataLayout {
231 /// Endianness of the target, which must match cfg(target-endian).
232 #[derive(Copy, Clone, PartialEq)]
239 pub fn as_str(&self) -> &'static str {
241 Self::Little => "little",
247 impl fmt::Debug for Endian {
248 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
249 f.write_str(self.as_str())
253 impl FromStr for Endian {
256 fn from_str(s: &str) -> Result<Self, Self::Err> {
258 "little" => Ok(Self::Little),
259 "big" => Ok(Self::Big),
260 _ => Err(format!(r#"unknown endian: "{}""#, s)),
265 impl ToJson for Endian {
266 fn to_json(&self) -> Json {
267 self.as_str().to_json()
271 /// Size of a type in bytes.
272 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
273 #[derive(HashStable_Generic)]
275 // The top 3 bits are ALWAYS zero.
280 pub const ZERO: Size = Size { raw: 0 };
282 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
284 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
285 let bits = bits.try_into().ok().unwrap();
288 fn overflow(bits: u64) -> ! {
289 panic!("Size::from_bits({}) has overflowed", bits);
292 // This is the largest value of `bits` that does not cause overflow
293 // during rounding, and guarantees that the resulting number of bytes
294 // cannot cause overflow when multiplied by 8.
295 if bits > 0xffff_ffff_ffff_fff8 {
299 // Avoid potential overflow from `bits + 7`.
300 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
304 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
305 let bytes: u64 = bytes.try_into().ok().unwrap();
310 pub fn bytes(self) -> u64 {
315 pub fn bytes_usize(self) -> usize {
316 self.bytes().try_into().unwrap()
320 pub fn bits(self) -> u64 {
325 pub fn bits_usize(self) -> usize {
326 self.bits().try_into().unwrap()
330 pub fn align_to(self, align: Align) -> Size {
331 let mask = align.bytes() - 1;
332 Size::from_bytes((self.bytes() + mask) & !mask)
336 pub fn is_aligned(self, align: Align) -> bool {
337 let mask = align.bytes() - 1;
338 self.bytes() & mask == 0
342 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
343 let dl = cx.data_layout();
345 let bytes = self.bytes().checked_add(offset.bytes())?;
347 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
351 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
352 let dl = cx.data_layout();
354 let bytes = self.bytes().checked_mul(count)?;
355 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
358 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
359 /// (i.e., if it is negative, fill with 1's on the left).
361 pub fn sign_extend(self, value: u128) -> u128 {
362 let size = self.bits();
364 // Truncated until nothing is left.
368 let shift = 128 - size;
369 // Shift the unsigned value to the left, then shift back to the right as signed
370 // (essentially fills with sign bit on the left).
371 (((value << shift) as i128) >> shift) as u128
374 /// Truncates `value` to `self` bits.
376 pub fn truncate(self, value: u128) -> u128 {
377 let size = self.bits();
379 // Truncated until nothing is left.
382 let shift = 128 - size;
383 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
384 (value << shift) >> shift
388 // Panicking addition, subtraction and multiplication for convenience.
389 // Avoid during layout computation, return `LayoutError` instead.
394 fn add(self, other: Size) -> Size {
395 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
396 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
404 fn sub(self, other: Size) -> Size {
405 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
406 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
411 impl Mul<Size> for u64 {
414 fn mul(self, size: Size) -> Size {
419 impl Mul<u64> for Size {
422 fn mul(self, count: u64) -> Size {
423 match self.bytes().checked_mul(count) {
424 Some(bytes) => Size::from_bytes(bytes),
425 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
430 impl AddAssign for Size {
432 fn add_assign(&mut self, other: Size) {
433 *self = *self + other;
437 /// Alignment of a type in bytes (always a power of two).
438 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
439 #[derive(HashStable_Generic)]
445 pub const ONE: Align = Align { pow2: 0 };
448 pub fn from_bits(bits: u64) -> Result<Align, String> {
449 Align::from_bytes(Size::from_bits(bits).bytes())
453 pub fn from_bytes(align: u64) -> Result<Align, String> {
454 // Treat an alignment of 0 bytes like 1-byte alignment.
456 return Ok(Align::ONE);
460 fn not_power_of_2(align: u64) -> String {
461 format!("`{}` is not a power of 2", align)
465 fn too_large(align: u64) -> String {
466 format!("`{}` is too large", align)
469 let mut bytes = align;
470 let mut pow2: u8 = 0;
471 while (bytes & 1) == 0 {
476 return Err(not_power_of_2(align));
479 return Err(too_large(align));
486 pub fn bytes(self) -> u64 {
491 pub fn bits(self) -> u64 {
495 /// Computes the best alignment possible for the given offset
496 /// (the largest power of two that the offset is a multiple of).
498 /// N.B., for an offset of `0`, this happens to return `2^64`.
500 pub fn max_for_offset(offset: Size) -> Align {
501 Align { pow2: offset.bytes().trailing_zeros() as u8 }
504 /// Lower the alignment, if necessary, such that the given offset
505 /// is aligned to it (the offset is a multiple of the alignment).
507 pub fn restrict_for_offset(self, offset: Size) -> Align {
508 self.min(Align::max_for_offset(offset))
512 /// A pair of alignments, ABI-mandated and preferred.
513 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)]
514 #[derive(HashStable_Generic)]
515 pub struct AbiAndPrefAlign {
520 impl AbiAndPrefAlign {
521 pub fn new(align: Align) -> AbiAndPrefAlign {
522 AbiAndPrefAlign { abi: align, pref: align }
525 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
526 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
529 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
530 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
534 /// Integers, also used for enum discriminants.
535 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
545 pub fn size(self) -> Size {
547 I8 => Size::from_bytes(1),
548 I16 => Size::from_bytes(2),
549 I32 => Size::from_bytes(4),
550 I64 => Size::from_bytes(8),
551 I128 => Size::from_bytes(16),
555 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
556 let dl = cx.data_layout();
563 I128 => dl.i128_align,
567 /// Finds the smallest Integer type which can represent the signed value.
568 pub fn fit_signed(x: i128) -> Integer {
570 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
571 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
572 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
573 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
578 /// Finds the smallest Integer type which can represent the unsigned value.
579 pub fn fit_unsigned(x: u128) -> Integer {
581 0..=0x0000_0000_0000_00ff => I8,
582 0..=0x0000_0000_0000_ffff => I16,
583 0..=0x0000_0000_ffff_ffff => I32,
584 0..=0xffff_ffff_ffff_ffff => I64,
589 /// Finds the smallest integer with the given alignment.
590 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
591 let dl = cx.data_layout();
593 for candidate in [I8, I16, I32, I64, I128] {
594 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
595 return Some(candidate);
601 /// Find the largest integer with the given alignment or less.
602 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
603 let dl = cx.data_layout();
605 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
606 for candidate in [I64, I32, I16] {
607 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
615 /// Fundamental unit of memory access and layout.
616 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
618 /// The `bool` is the signedness of the `Integer` type.
620 /// One would think we would not care about such details this low down,
621 /// but some ABIs are described in terms of C types and ISAs where the
622 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
623 /// a negative integer passed by zero-extension will appear positive in
624 /// the callee, and most operations on it will produce the wrong values.
632 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
633 let dl = cx.data_layout();
636 Int(i, _) => i.size(),
637 F32 => Size::from_bits(32),
638 F64 => Size::from_bits(64),
639 Pointer => dl.pointer_size,
643 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
644 let dl = cx.data_layout();
647 Int(i, _) => i.align(dl),
650 Pointer => dl.pointer_align,
654 pub fn is_float(self) -> bool {
655 matches!(self, F32 | F64)
658 pub fn is_int(self) -> bool {
659 matches!(self, Int(..))
663 /// Information about one scalar component of a Rust type.
664 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
665 #[derive(HashStable_Generic)]
667 pub value: Primitive,
669 /// Inclusive wrap-around range of valid values, that is, if
670 /// start > end, it represents `start..=MAX`,
671 /// followed by `0..=end`.
673 /// That is, for an i8 primitive, a range of `254..=2` means following
676 /// 254 (-2), 255 (-1), 0, 1, 2
678 /// This is intended specifically to mirror LLVM’s `!range` metadata,
680 // FIXME(eddyb) always use the shortest range, e.g., by finding
681 // the largest space between two consecutive valid values and
682 // taking everything else as the (shortest) valid range.
683 pub valid_range: RangeInclusive<u128>,
687 pub fn is_bool(&self) -> bool {
688 matches!(self.value, Int(I8, false)) && self.valid_range == (0..=1)
691 /// Returns the valid range as a `x..y` range.
693 /// If `x` and `y` are equal, the range is full, not empty.
694 pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> {
695 // For a (max) value of -1, max will be `-1 as usize`, which overflows.
696 // However, that is fine here (it would still represent the full range),
697 // i.e., if the range is everything.
698 let bits = self.value.size(cx).bits();
699 assert!(bits <= 128);
700 let mask = !0u128 >> (128 - bits);
701 let start = *self.valid_range.start();
702 let end = *self.valid_range.end();
703 assert_eq!(start, start & mask);
704 assert_eq!(end, end & mask);
705 start..(end.wrapping_add(1) & mask)
709 /// Describes how the fields of a type are located in memory.
710 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
711 pub enum FieldsShape {
712 /// Scalar primitives and `!`, which never have fields.
715 /// All fields start at no offset. The `usize` is the field count.
718 /// Array/vector-like placement, with all fields of identical types.
719 Array { stride: Size, count: u64 },
721 /// Struct-like placement, with precomputed offsets.
723 /// Fields are guaranteed to not overlap, but note that gaps
724 /// before, between and after all the fields are NOT always
725 /// padding, and as such their contents may not be discarded.
726 /// For example, enum variants leave a gap at the start,
727 /// where the discriminant field in the enum layout goes.
729 /// Offsets for the first byte of each field,
730 /// ordered to match the source definition order.
731 /// This vector does not go in increasing order.
732 // FIXME(eddyb) use small vector optimization for the common case.
735 /// Maps source order field indices to memory order indices,
736 /// depending on how the fields were reordered (if at all).
737 /// This is a permutation, with both the source order and the
738 /// memory order using the same (0..n) index ranges.
740 /// Note that during computation of `memory_index`, sometimes
741 /// it is easier to operate on the inverse mapping (that is,
742 /// from memory order to source order), and that is usually
743 /// named `inverse_memory_index`.
745 // FIXME(eddyb) build a better abstraction for permutations, if possible.
746 // FIXME(camlorn) also consider small vector optimization here.
747 memory_index: Vec<u32>,
752 pub fn count(&self) -> usize {
754 FieldsShape::Primitive => 0,
755 FieldsShape::Union(count) => count.get(),
756 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
757 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
761 pub fn offset(&self, i: usize) -> Size {
763 FieldsShape::Primitive => {
764 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
766 FieldsShape::Union(count) => {
769 "tried to access field {} of union with {} fields",
775 FieldsShape::Array { stride, count } => {
776 let i = u64::try_from(i).unwrap();
780 FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
784 pub fn memory_index(&self, i: usize) -> usize {
786 FieldsShape::Primitive => {
787 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
789 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
790 FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
794 /// Gets source indices of the fields by increasing offsets.
796 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
797 let mut inverse_small = [0u8; 64];
798 let mut inverse_big = vec![];
799 let use_small = self.count() <= inverse_small.len();
801 // We have to write this logic twice in order to keep the array small.
802 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
804 for i in 0..self.count() {
805 inverse_small[memory_index[i] as usize] = i as u8;
808 inverse_big = vec![0; self.count()];
809 for i in 0..self.count() {
810 inverse_big[memory_index[i] as usize] = i as u32;
815 (0..self.count()).map(move |i| match *self {
816 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
817 FieldsShape::Arbitrary { .. } => {
819 inverse_small[i] as usize
821 inverse_big[i] as usize
828 /// An identifier that specifies the address space that some operation
829 /// should operate on. Special address spaces have an effect on code generation,
830 /// depending on the target and the address spaces it implements.
831 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
832 pub struct AddressSpace(pub u32);
835 /// The default address space, corresponding to data space.
836 pub const DATA: Self = AddressSpace(0);
839 /// Describes how values of the type are passed by target ABIs,
840 /// in terms of categories of C types there are ABI rules for.
841 #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
845 ScalarPair(Scalar, Scalar),
851 /// If true, the size is exact, otherwise it's only a lower bound.
857 /// Returns `true` if the layout corresponds to an unsized type.
859 pub fn is_unsized(&self) -> bool {
861 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
862 Abi::Aggregate { sized } => !sized,
866 /// Returns `true` if this is a single signed integer scalar
867 pub fn is_signed(&self) -> bool {
869 Abi::Scalar(ref scal) => match scal.value {
870 Primitive::Int(_, signed) => signed,
873 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
877 /// Returns `true` if this is an uninhabited type
879 pub fn is_uninhabited(&self) -> bool {
880 matches!(*self, Abi::Uninhabited)
883 /// Returns `true` is this is a scalar type
885 pub fn is_scalar(&self) -> bool {
886 matches!(*self, Abi::Scalar(_))
890 rustc_index::newtype_index! {
891 pub struct VariantIdx {
892 derive [HashStable_Generic]
896 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
898 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
899 Single { index: VariantIdx },
901 /// Enum-likes with more than one inhabited variant: each variant comes with
902 /// a *discriminant* (usually the same as the variant index but the user can
903 /// assign explicit discriminant values). That discriminant is encoded
904 /// as a *tag* on the machine. The layout of each variant is
905 /// a struct, and they all have space reserved for the tag.
906 /// For enums, the tag is the sole field of the layout.
909 tag_encoding: TagEncoding,
911 variants: IndexVec<VariantIdx, Layout>,
915 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
916 pub enum TagEncoding {
917 /// The tag directly stores the discriminant, but possibly with a smaller layout
918 /// (so converting the tag to the discriminant can require sign extension).
921 /// Niche (values invalid for a type) encoding the discriminant:
922 /// Discriminant and variant index coincide.
923 /// The variant `dataful_variant` contains a niche at an arbitrary
924 /// offset (field `tag_field` of the enum), which for a variant with
925 /// discriminant `d` is set to
926 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
928 /// For example, `Option<(usize, &T)>` is represented such that
929 /// `None` has a null pointer for the second tuple field, and
930 /// `Some` is the identity function (with a non-null reference).
932 dataful_variant: VariantIdx,
933 niche_variants: RangeInclusive<VariantIdx>,
938 #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
945 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
946 let niche = Niche { offset, scalar };
947 if niche.available(cx) > 0 { Some(niche) } else { None }
950 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
951 let Scalar { value, valid_range: ref v } = self.scalar;
952 let bits = value.size(cx).bits();
953 assert!(bits <= 128);
954 let max_value = !0u128 >> (128 - bits);
956 // Find out how many values are outside the valid range.
957 let niche = v.end().wrapping_add(1)..*v.start();
958 niche.end.wrapping_sub(niche.start) & max_value
961 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
964 let Scalar { value, valid_range: ref v } = self.scalar;
965 let bits = value.size(cx).bits();
966 assert!(bits <= 128);
967 let max_value = !0u128 >> (128 - bits);
969 if count > max_value {
973 // Compute the range of invalid values being reserved.
974 let start = v.end().wrapping_add(1) & max_value;
975 let end = v.end().wrapping_add(count) & max_value;
977 // If the `end` of our range is inside the valid range,
978 // then we ran out of invalid values.
979 // FIXME(eddyb) abstract this with a wraparound range type.
980 let valid_range_contains = |x| {
981 if v.start() <= v.end() {
982 *v.start() <= x && x <= *v.end()
984 *v.start() <= x || x <= *v.end()
987 if valid_range_contains(end) {
991 Some((start, Scalar { value, valid_range: *v.start()..=end }))
995 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
997 /// Says where the fields are located within the layout.
998 pub fields: FieldsShape,
1000 /// Encodes information about multi-variant layouts.
1001 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1002 /// shared between all variants. One of them will be the discriminant,
1003 /// but e.g. generators can have more.
1005 /// To access all fields of this layout, both `fields` and the fields of the active variant
1006 /// must be taken into account.
1007 pub variants: Variants,
1009 /// The `abi` defines how this data is passed between functions, and it defines
1010 /// value restrictions via `valid_range`.
1012 /// Note that this is entirely orthogonal to the recursive structure defined by
1013 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1014 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1015 /// have to be taken into account to find all fields of this layout.
1018 /// The leaf scalar with the largest number of invalid values
1019 /// (i.e. outside of its `valid_range`), if it exists.
1020 pub largest_niche: Option<Niche>,
1022 pub align: AbiAndPrefAlign,
1027 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1028 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar.clone());
1029 let size = scalar.value.size(cx);
1030 let align = scalar.value.align(cx);
1032 variants: Variants::Single { index: VariantIdx::new(0) },
1033 fields: FieldsShape::Primitive,
1034 abi: Abi::Scalar(scalar),
1042 /// The layout of a type, alongside the type itself.
1043 /// Provides various type traversal APIs (e.g., recursing into fields).
1045 /// Note that the layout is NOT guaranteed to always be identical
1046 /// to that obtained from `layout_of(ty)`, as we need to produce
1047 /// layouts for which Rust types do not exist, such as enum variants
1048 /// or synthetic fields of enums (i.e., discriminants) and fat pointers.
1049 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
1050 pub struct TyAndLayout<'a, Ty> {
1052 pub layout: &'a Layout,
1055 impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
1056 type Target = &'a Layout;
1057 fn deref(&self) -> &&'a Layout {
1062 /// Trait for context types that can compute layouts of things.
1063 pub trait LayoutOf {
1067 fn layout_of(&self, ty: Self::Ty) -> Self::TyAndLayout;
1068 fn spanned_layout_of(&self, ty: Self::Ty, _span: Span) -> Self::TyAndLayout {
1073 /// The `TyAndLayout` above will always be a `MaybeResult<TyAndLayout<'_, Self>>`.
1074 /// We can't add the bound due to the lifetime, but this trait is still useful when
1075 /// writing code that's generic over the `LayoutOf` impl.
1076 pub trait MaybeResult<T> {
1079 fn from(x: Result<T, Self::Error>) -> Self;
1080 fn to_result(self) -> Result<T, Self::Error>;
1083 impl<T> MaybeResult<T> for T {
1086 fn from(Ok(x): Result<T, Self::Error>) -> Self {
1089 fn to_result(self) -> Result<T, Self::Error> {
1094 impl<T, E> MaybeResult<T> for Result<T, E> {
1097 fn from(x: Result<T, Self::Error>) -> Self {
1100 fn to_result(self) -> Result<T, Self::Error> {
1105 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1106 pub enum PointerKind {
1107 /// Most general case, we know no restrictions to tell LLVM.
1110 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
1113 /// `&mut T` which is `noalias` but not `readonly`.
1116 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
1120 #[derive(Copy, Clone, Debug)]
1121 pub struct PointeeInfo {
1124 pub safe: Option<PointerKind>,
1125 pub address_space: AddressSpace,
1128 pub trait TyAndLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
1130 this: TyAndLayout<'a, Self>,
1132 variant_index: VariantIdx,
1133 ) -> TyAndLayout<'a, Self>;
1134 fn field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> C::TyAndLayout;
1135 fn pointee_info_at(this: TyAndLayout<'a, Self>, cx: &C, offset: Size) -> Option<PointeeInfo>;
1138 impl<'a, Ty> TyAndLayout<'a, Ty> {
1139 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
1141 Ty: TyAndLayoutMethods<'a, C>,
1142 C: LayoutOf<Ty = Ty>,
1144 Ty::for_variant(self, cx, variant_index)
1147 /// Callers might want to use `C: LayoutOf<Ty=Ty, TyAndLayout: MaybeResult<Self>>`
1148 /// to allow recursion (see `might_permit_zero_init` below for an example).
1149 pub fn field<C>(self, cx: &C, i: usize) -> C::TyAndLayout
1151 Ty: TyAndLayoutMethods<'a, C>,
1152 C: LayoutOf<Ty = Ty>,
1154 Ty::field(self, cx, i)
1157 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
1159 Ty: TyAndLayoutMethods<'a, C>,
1160 C: LayoutOf<Ty = Ty>,
1162 Ty::pointee_info_at(self, cx, offset)
1166 impl<'a, Ty> TyAndLayout<'a, Ty> {
1167 /// Returns `true` if the layout corresponds to an unsized type.
1168 pub fn is_unsized(&self) -> bool {
1169 self.abi.is_unsized()
1172 /// Returns `true` if the type is a ZST and not unsized.
1173 pub fn is_zst(&self) -> bool {
1175 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1176 Abi::Uninhabited => self.size.bytes() == 0,
1177 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
1181 /// Determines if this type permits "raw" initialization by just transmuting some
1182 /// memory into an instance of `T`.
1183 /// `zero` indicates if the memory is zero-initialized, or alternatively
1184 /// left entirely uninitialized.
1185 /// This is conservative: in doubt, it will answer `true`.
1187 /// FIXME: Once we removed all the conservatism, we could alternatively
1188 /// create an all-0/all-undef constant and run the const value validator to see if
1189 /// this is a valid value for the given type.
1190 pub fn might_permit_raw_init<C, E>(self, cx: &C, zero: bool) -> Result<bool, E>
1193 Ty: TyAndLayoutMethods<'a, C>,
1194 C: LayoutOf<Ty = Ty, TyAndLayout: MaybeResult<Self, Error = E>> + HasDataLayout,
1196 let scalar_allows_raw_init = move |s: &Scalar| -> bool {
1198 let range = &s.valid_range;
1199 // The range must contain 0.
1200 range.contains(&0) || (*range.start() > *range.end()) // wrap-around allows 0
1202 // The range must include all values. `valid_range_exclusive` handles
1203 // the wrap-around using target arithmetic; with wrap-around then the full
1204 // range is one where `start == end`.
1205 let range = s.valid_range_exclusive(cx);
1206 range.start == range.end
1211 let valid = match &self.abi {
1212 Abi::Uninhabited => false, // definitely UB
1213 Abi::Scalar(s) => scalar_allows_raw_init(s),
1214 Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
1215 Abi::Vector { element: s, count } => *count == 0 || scalar_allows_raw_init(s),
1216 Abi::Aggregate { .. } => true, // Fields are checked below.
1219 // This is definitely not okay.
1223 // If we have not found an error yet, we need to recursively descend into fields.
1224 match &self.fields {
1225 FieldsShape::Primitive | FieldsShape::Union { .. } => {}
1226 FieldsShape::Array { .. } => {
1227 // FIXME(#66151): For now, we are conservative and do not check arrays.
1229 FieldsShape::Arbitrary { offsets, .. } => {
1230 for idx in 0..offsets.len() {
1231 let field = self.field(cx, idx).to_result()?;
1232 if !field.might_permit_raw_init(cx, zero)? {
1233 // We found a field that is unhappy with this kind of initialization.
1240 // FIXME(#66151): For now, we are conservative and do not check `self.variants`.