4 use crate::json::{Json, ToJson};
5 use crate::spec::Target;
7 use std::convert::{TryFrom, TryInto};
10 use std::num::{NonZeroUsize, ParseIntError};
11 use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
12 use std::str::FromStr;
14 use rustc_data_structures::intern::Interned;
15 use rustc_index::vec::{Idx, IndexVec};
16 use rustc_macros::HashStable_Generic;
20 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
21 /// for a target, which contains everything needed to compute layouts.
22 pub struct TargetDataLayout {
24 pub i1_align: AbiAndPrefAlign,
25 pub i8_align: AbiAndPrefAlign,
26 pub i16_align: AbiAndPrefAlign,
27 pub i32_align: AbiAndPrefAlign,
28 pub i64_align: AbiAndPrefAlign,
29 pub i128_align: AbiAndPrefAlign,
30 pub f32_align: AbiAndPrefAlign,
31 pub f64_align: AbiAndPrefAlign,
32 pub pointer_size: Size,
33 pub pointer_align: AbiAndPrefAlign,
34 pub aggregate_align: AbiAndPrefAlign,
36 /// Alignments for vector types.
37 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
39 pub instruction_address_space: AddressSpace,
41 /// Minimum size of #[repr(C)] enums (default I32 bits)
42 pub c_enum_min_size: Integer,
45 impl Default for TargetDataLayout {
46 /// Creates an instance of `TargetDataLayout`.
47 fn default() -> TargetDataLayout {
48 let align = |bits| Align::from_bits(bits).unwrap();
51 i1_align: AbiAndPrefAlign::new(align(8)),
52 i8_align: AbiAndPrefAlign::new(align(8)),
53 i16_align: AbiAndPrefAlign::new(align(16)),
54 i32_align: AbiAndPrefAlign::new(align(32)),
55 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
56 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
57 f32_align: AbiAndPrefAlign::new(align(32)),
58 f64_align: AbiAndPrefAlign::new(align(64)),
59 pointer_size: Size::from_bits(64),
60 pointer_align: AbiAndPrefAlign::new(align(64)),
61 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
63 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
64 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
66 instruction_address_space: AddressSpace::DATA,
67 c_enum_min_size: Integer::I32,
72 pub enum TargetDataLayoutErrors<'a> {
73 InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
74 InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
75 MissingAlignment { cause: &'a str },
76 InvalidAlignment { cause: &'a str, err: String },
77 InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
78 InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
79 InvalidBitsSize { err: String },
82 impl TargetDataLayout {
83 pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
84 // Parse an address space index from a string.
85 let parse_address_space = |s: &'a str, cause: &'a str| {
86 s.parse::<u32>().map(AddressSpace).map_err(|err| {
87 TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
91 // Parse a bit count from a string.
92 let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
93 s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
101 // Parse a size string.
102 let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
104 // Parse an alignment string.
105 let align = |s: &[&'a str], cause: &'a str| {
107 return Err(TargetDataLayoutErrors::MissingAlignment { cause });
109 let align_from_bits = |bits| {
110 Align::from_bits(bits)
111 .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
113 let abi = parse_bits(s[0], "alignment", cause)?;
114 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
115 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
118 let mut dl = TargetDataLayout::default();
119 let mut i128_align_src = 64;
120 for spec in target.data_layout.split('-') {
121 let spec_parts = spec.split(':').collect::<Vec<_>>();
124 ["e"] => dl.endian = Endian::Little,
125 ["E"] => dl.endian = Endian::Big,
126 [p] if p.starts_with('P') => {
127 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
129 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
130 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
131 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
132 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
133 dl.pointer_size = size(s, p)?;
134 dl.pointer_align = align(a, p)?;
136 [s, ref a @ ..] if s.starts_with('i') => {
137 let Ok(bits) = s[1..].parse::<u64>() else {
138 size(&s[1..], "i")?; // For the user error.
141 let a = align(a, s)?;
143 1 => dl.i1_align = a,
144 8 => dl.i8_align = a,
145 16 => dl.i16_align = a,
146 32 => dl.i32_align = a,
147 64 => dl.i64_align = a,
150 if bits >= i128_align_src && bits <= 128 {
151 // Default alignment for i128 is decided by taking the alignment of
152 // largest-sized i{64..=128}.
153 i128_align_src = bits;
157 [s, ref a @ ..] if s.starts_with('v') => {
158 let v_size = size(&s[1..], "v")?;
159 let a = align(a, s)?;
160 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
164 // No existing entry, add a new one.
165 dl.vector_align.push((v_size, a));
167 _ => {} // Ignore everything else.
171 // Perform consistency checks against the Target information.
172 if dl.endian != target.endian {
173 return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture {
174 dl: dl.endian.as_str(),
175 target: target.endian.as_str(),
179 let target_pointer_width: u64 = target.pointer_width.into();
180 if dl.pointer_size.bits() != target_pointer_width {
181 return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth {
182 pointer_size: dl.pointer_size.bits(),
183 target: target.pointer_width,
187 dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) {
189 Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }),
195 /// Returns exclusive upper bound on object size.
197 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
198 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
199 /// index every address within an object along with one byte past the end, along with allowing
200 /// `isize` to store the difference between any two pointers into an object.
202 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
203 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
204 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
205 /// address space on 64-bit ARMv8 and x86_64.
207 pub fn obj_size_bound(&self) -> u64 {
208 match self.pointer_size.bits() {
212 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
217 pub fn ptr_sized_integer(&self) -> Integer {
218 match self.pointer_size.bits() {
222 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
227 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
228 for &(size, align) in &self.vector_align {
229 if size == vec_size {
233 // Default to natural alignment, which is what LLVM does.
234 // That is, use the size, rounded up to a power of 2.
235 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
239 pub trait HasDataLayout {
240 fn data_layout(&self) -> &TargetDataLayout;
243 impl HasDataLayout for TargetDataLayout {
245 fn data_layout(&self) -> &TargetDataLayout {
250 /// Endianness of the target, which must match cfg(target-endian).
251 #[derive(Copy, Clone, PartialEq)]
258 pub fn as_str(&self) -> &'static str {
260 Self::Little => "little",
266 impl fmt::Debug for Endian {
267 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
268 f.write_str(self.as_str())
272 impl FromStr for Endian {
275 fn from_str(s: &str) -> Result<Self, Self::Err> {
277 "little" => Ok(Self::Little),
278 "big" => Ok(Self::Big),
279 _ => Err(format!(r#"unknown endian: "{}""#, s)),
284 impl ToJson for Endian {
285 fn to_json(&self) -> Json {
286 self.as_str().to_json()
290 /// Size of a type in bytes.
291 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
292 #[derive(HashStable_Generic)]
297 // This is debug-printed a lot in larger structs, don't waste too much space there
298 impl fmt::Debug for Size {
299 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
300 write!(f, "Size({} bytes)", self.bytes())
305 pub const ZERO: Size = Size { raw: 0 };
307 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
308 /// not a multiple of 8.
309 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
310 let bits = bits.try_into().ok().unwrap();
311 // Avoid potential overflow from `bits + 7`.
312 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
316 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
317 let bytes: u64 = bytes.try_into().ok().unwrap();
322 pub fn bytes(self) -> u64 {
327 pub fn bytes_usize(self) -> usize {
328 self.bytes().try_into().unwrap()
332 pub fn bits(self) -> u64 {
334 fn overflow(bytes: u64) -> ! {
335 panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
338 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
342 pub fn bits_usize(self) -> usize {
343 self.bits().try_into().unwrap()
347 pub fn align_to(self, align: Align) -> Size {
348 let mask = align.bytes() - 1;
349 Size::from_bytes((self.bytes() + mask) & !mask)
353 pub fn is_aligned(self, align: Align) -> bool {
354 let mask = align.bytes() - 1;
355 self.bytes() & mask == 0
359 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
360 let dl = cx.data_layout();
362 let bytes = self.bytes().checked_add(offset.bytes())?;
364 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
368 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
369 let dl = cx.data_layout();
371 let bytes = self.bytes().checked_mul(count)?;
372 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
375 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
376 /// (i.e., if it is negative, fill with 1's on the left).
378 pub fn sign_extend(self, value: u128) -> u128 {
379 let size = self.bits();
381 // Truncated until nothing is left.
385 let shift = 128 - size;
386 // Shift the unsigned value to the left, then shift back to the right as signed
387 // (essentially fills with sign bit on the left).
388 (((value << shift) as i128) >> shift) as u128
391 /// Truncates `value` to `self` bits.
393 pub fn truncate(self, value: u128) -> u128 {
394 let size = self.bits();
396 // Truncated until nothing is left.
399 let shift = 128 - size;
400 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
401 (value << shift) >> shift
405 pub fn signed_int_min(&self) -> i128 {
406 self.sign_extend(1_u128 << (self.bits() - 1)) as i128
410 pub fn signed_int_max(&self) -> i128 {
411 i128::MAX >> (128 - self.bits())
415 pub fn unsigned_int_max(&self) -> u128 {
416 u128::MAX >> (128 - self.bits())
420 // Panicking addition, subtraction and multiplication for convenience.
421 // Avoid during layout computation, return `LayoutError` instead.
426 fn add(self, other: Size) -> Size {
427 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
428 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
436 fn sub(self, other: Size) -> Size {
437 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
438 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
443 impl Mul<Size> for u64 {
446 fn mul(self, size: Size) -> Size {
451 impl Mul<u64> for Size {
454 fn mul(self, count: u64) -> Size {
455 match self.bytes().checked_mul(count) {
456 Some(bytes) => Size::from_bytes(bytes),
457 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
462 impl AddAssign for Size {
464 fn add_assign(&mut self, other: Size) {
465 *self = *self + other;
471 fn steps_between(start: &Self, end: &Self) -> Option<usize> {
472 u64::steps_between(&start.bytes(), &end.bytes())
476 fn forward_checked(start: Self, count: usize) -> Option<Self> {
477 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
481 fn forward(start: Self, count: usize) -> Self {
482 Self::from_bytes(u64::forward(start.bytes(), count))
486 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
487 Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
491 fn backward_checked(start: Self, count: usize) -> Option<Self> {
492 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
496 fn backward(start: Self, count: usize) -> Self {
497 Self::from_bytes(u64::backward(start.bytes(), count))
501 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
502 Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
506 /// Alignment of a type in bytes (always a power of two).
507 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
508 #[derive(HashStable_Generic)]
513 // This is debug-printed a lot in larger structs, don't waste too much space there
514 impl fmt::Debug for Align {
515 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
516 write!(f, "Align({} bytes)", self.bytes())
521 pub const ONE: Align = Align { pow2: 0 };
522 pub const MAX: Align = Align { pow2: 29 };
525 pub fn from_bits(bits: u64) -> Result<Align, String> {
526 Align::from_bytes(Size::from_bits(bits).bytes())
530 pub fn from_bytes(align: u64) -> Result<Align, String> {
531 // Treat an alignment of 0 bytes like 1-byte alignment.
533 return Ok(Align::ONE);
537 fn not_power_of_2(align: u64) -> String {
538 format!("`{}` is not a power of 2", align)
542 fn too_large(align: u64) -> String {
543 format!("`{}` is too large", align)
546 let mut bytes = align;
547 let mut pow2: u8 = 0;
548 while (bytes & 1) == 0 {
553 return Err(not_power_of_2(align));
555 if pow2 > Self::MAX.pow2 {
556 return Err(too_large(align));
563 pub fn bytes(self) -> u64 {
568 pub fn bits(self) -> u64 {
572 /// Computes the best alignment possible for the given offset
573 /// (the largest power of two that the offset is a multiple of).
575 /// N.B., for an offset of `0`, this happens to return `2^64`.
577 pub fn max_for_offset(offset: Size) -> Align {
578 Align { pow2: offset.bytes().trailing_zeros() as u8 }
581 /// Lower the alignment, if necessary, such that the given offset
582 /// is aligned to it (the offset is a multiple of the alignment).
584 pub fn restrict_for_offset(self, offset: Size) -> Align {
585 self.min(Align::max_for_offset(offset))
589 /// A pair of alignments, ABI-mandated and preferred.
590 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
591 #[derive(HashStable_Generic)]
592 pub struct AbiAndPrefAlign {
597 impl AbiAndPrefAlign {
599 pub fn new(align: Align) -> AbiAndPrefAlign {
600 AbiAndPrefAlign { abi: align, pref: align }
604 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
605 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
609 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
610 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
614 /// Integers, also used for enum discriminants.
615 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
626 pub fn size(self) -> Size {
628 I8 => Size::from_bytes(1),
629 I16 => Size::from_bytes(2),
630 I32 => Size::from_bytes(4),
631 I64 => Size::from_bytes(8),
632 I128 => Size::from_bytes(16),
636 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
637 let dl = cx.data_layout();
644 I128 => dl.i128_align,
648 /// Finds the smallest Integer type which can represent the signed value.
650 pub fn fit_signed(x: i128) -> Integer {
652 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
653 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
654 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
655 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
660 /// Finds the smallest Integer type which can represent the unsigned value.
662 pub fn fit_unsigned(x: u128) -> Integer {
664 0..=0x0000_0000_0000_00ff => I8,
665 0..=0x0000_0000_0000_ffff => I16,
666 0..=0x0000_0000_ffff_ffff => I32,
667 0..=0xffff_ffff_ffff_ffff => I64,
672 /// Finds the smallest integer with the given alignment.
673 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
674 let dl = cx.data_layout();
676 for candidate in [I8, I16, I32, I64, I128] {
677 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
678 return Some(candidate);
684 /// Find the largest integer with the given alignment or less.
685 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
686 let dl = cx.data_layout();
688 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
689 for candidate in [I64, I32, I16] {
690 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
697 // FIXME(eddyb) consolidate this and other methods that find the appropriate
698 // `Integer` given some requirements.
700 fn from_size(size: Size) -> Result<Self, String> {
702 8 => Ok(Integer::I8),
703 16 => Ok(Integer::I16),
704 32 => Ok(Integer::I32),
705 64 => Ok(Integer::I64),
706 128 => Ok(Integer::I128),
707 _ => Err(format!("rust does not support integers with {} bits", size.bits())),
712 /// Fundamental unit of memory access and layout.
713 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
715 /// The `bool` is the signedness of the `Integer` type.
717 /// One would think we would not care about such details this low down,
718 /// but some ABIs are described in terms of C types and ISAs where the
719 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
720 /// a negative integer passed by zero-extension will appear positive in
721 /// the callee, and most operations on it will produce the wrong values.
729 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
730 let dl = cx.data_layout();
733 Int(i, _) => i.size(),
734 F32 => Size::from_bits(32),
735 F64 => Size::from_bits(64),
736 Pointer => dl.pointer_size,
740 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
741 let dl = cx.data_layout();
744 Int(i, _) => i.align(dl),
747 Pointer => dl.pointer_align,
751 // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
753 pub fn is_float(self) -> bool {
754 matches!(self, F32 | F64)
757 // FIXME(eddyb) remove, it's completely unused.
759 pub fn is_int(self) -> bool {
760 matches!(self, Int(..))
764 pub fn is_ptr(self) -> bool {
765 matches!(self, Pointer)
769 /// Inclusive wrap-around range of valid values, that is, if
770 /// start > end, it represents `start..=MAX`,
771 /// followed by `0..=end`.
773 /// That is, for an i8 primitive, a range of `254..=2` means following
776 /// 254 (-2), 255 (-1), 0, 1, 2
778 /// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
779 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
780 #[derive(HashStable_Generic)]
781 pub struct WrappingRange {
787 pub fn full(size: Size) -> Self {
788 Self { start: 0, end: size.unsigned_int_max() }
791 /// Returns `true` if `v` is contained in the range.
793 pub fn contains(&self, v: u128) -> bool {
794 if self.start <= self.end {
795 self.start <= v && v <= self.end
797 self.start <= v || v <= self.end
801 /// Returns `self` with replaced `start`
803 pub fn with_start(mut self, start: u128) -> Self {
808 /// Returns `self` with replaced `end`
810 pub fn with_end(mut self, end: u128) -> Self {
815 /// Returns `true` if `size` completely fills the range.
817 pub fn is_full_for(&self, size: Size) -> bool {
818 let max_value = size.unsigned_int_max();
819 debug_assert!(self.start <= max_value && self.end <= max_value);
820 self.start == (self.end.wrapping_add(1) & max_value)
824 impl fmt::Debug for WrappingRange {
825 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
826 if self.start > self.end {
827 write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
829 write!(fmt, "{}..={}", self.start, self.end)?;
835 /// Information about one scalar component of a Rust type.
836 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
837 #[derive(HashStable_Generic)]
842 // FIXME(eddyb) always use the shortest range, e.g., by finding
843 // the largest space between two consecutive valid values and
844 // taking everything else as the (shortest) valid range.
845 valid_range: WrappingRange,
848 /// Even for unions, we need to use the correct registers for the kind of
849 /// values inside the union, so we keep the `Primitive` type around. We
850 /// also use it to compute the size of the scalar.
851 /// However, unions never have niches and even allow undef,
852 /// so there is no `valid_range`.
859 pub fn is_bool(&self) -> bool {
862 Scalar::Initialized {
863 value: Int(I8, false),
864 valid_range: WrappingRange { start: 0, end: 1 }
869 /// Get the primitive representation of this type, ignoring the valid range and whether the
870 /// value is allowed to be undefined (due to being a union).
871 pub fn primitive(&self) -> Primitive {
873 Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
877 pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
878 self.primitive().align(cx)
881 pub fn size(self, cx: &impl HasDataLayout) -> Size {
882 self.primitive().size(cx)
886 pub fn to_union(&self) -> Self {
887 Self::Union { value: self.primitive() }
891 pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
893 Scalar::Initialized { valid_range, .. } => valid_range,
894 Scalar::Union { value } => WrappingRange::full(value.size(cx)),
899 /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
900 pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
902 Scalar::Initialized { valid_range, .. } => valid_range,
903 Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
907 /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
909 pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
911 Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
912 Scalar::Union { .. } => true,
916 /// Returns `true` if this type can be left uninit.
918 pub fn is_uninit_valid(&self) -> bool {
920 Scalar::Initialized { .. } => false,
921 Scalar::Union { .. } => true,
926 /// Describes how the fields of a type are located in memory.
927 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
928 pub enum FieldsShape {
929 /// Scalar primitives and `!`, which never have fields.
932 /// All fields start at no offset. The `usize` is the field count.
935 /// Array/vector-like placement, with all fields of identical types.
936 Array { stride: Size, count: u64 },
938 /// Struct-like placement, with precomputed offsets.
940 /// Fields are guaranteed to not overlap, but note that gaps
941 /// before, between and after all the fields are NOT always
942 /// padding, and as such their contents may not be discarded.
943 /// For example, enum variants leave a gap at the start,
944 /// where the discriminant field in the enum layout goes.
946 /// Offsets for the first byte of each field,
947 /// ordered to match the source definition order.
948 /// This vector does not go in increasing order.
949 // FIXME(eddyb) use small vector optimization for the common case.
952 /// Maps source order field indices to memory order indices,
953 /// depending on how the fields were reordered (if at all).
954 /// This is a permutation, with both the source order and the
955 /// memory order using the same (0..n) index ranges.
957 /// Note that during computation of `memory_index`, sometimes
958 /// it is easier to operate on the inverse mapping (that is,
959 /// from memory order to source order), and that is usually
960 /// named `inverse_memory_index`.
962 // FIXME(eddyb) build a better abstraction for permutations, if possible.
963 // FIXME(camlorn) also consider small vector optimization here.
964 memory_index: Vec<u32>,
970 pub fn count(&self) -> usize {
972 FieldsShape::Primitive => 0,
973 FieldsShape::Union(count) => count.get(),
974 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
975 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
980 pub fn offset(&self, i: usize) -> Size {
982 FieldsShape::Primitive => {
983 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
985 FieldsShape::Union(count) => {
988 "tried to access field {} of union with {} fields",
994 FieldsShape::Array { stride, count } => {
995 let i = u64::try_from(i).unwrap();
999 FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
1004 pub fn memory_index(&self, i: usize) -> usize {
1006 FieldsShape::Primitive => {
1007 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1009 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1010 FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
1014 /// Gets source indices of the fields by increasing offsets.
1016 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
1017 let mut inverse_small = [0u8; 64];
1018 let mut inverse_big = vec![];
1019 let use_small = self.count() <= inverse_small.len();
1021 // We have to write this logic twice in order to keep the array small.
1022 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1024 for i in 0..self.count() {
1025 inverse_small[memory_index[i] as usize] = i as u8;
1028 inverse_big = vec![0; self.count()];
1029 for i in 0..self.count() {
1030 inverse_big[memory_index[i] as usize] = i as u32;
1035 (0..self.count()).map(move |i| match *self {
1036 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1037 FieldsShape::Arbitrary { .. } => {
1039 inverse_small[i] as usize
1041 inverse_big[i] as usize
1048 /// An identifier that specifies the address space that some operation
1049 /// should operate on. Special address spaces have an effect on code generation,
1050 /// depending on the target and the address spaces it implements.
1051 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
1052 pub struct AddressSpace(pub u32);
1055 /// The default address space, corresponding to data space.
1056 pub const DATA: Self = AddressSpace(0);
1059 /// Describes how values of the type are passed by target ABIs,
1060 /// in terms of categories of C types there are ABI rules for.
1061 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1065 ScalarPair(Scalar, Scalar),
1071 /// If true, the size is exact, otherwise it's only a lower bound.
1077 /// Returns `true` if the layout corresponds to an unsized type.
1079 pub fn is_unsized(&self) -> bool {
1081 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1082 Abi::Aggregate { sized } => !sized,
1087 pub fn is_sized(&self) -> bool {
1091 /// Returns `true` if this is a single signed integer scalar
1093 pub fn is_signed(&self) -> bool {
1095 Abi::Scalar(scal) => match scal.primitive() {
1096 Primitive::Int(_, signed) => signed,
1099 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
1103 /// Returns `true` if this is an uninhabited type
1105 pub fn is_uninhabited(&self) -> bool {
1106 matches!(*self, Abi::Uninhabited)
1109 /// Returns `true` is this is a scalar type
1111 pub fn is_scalar(&self) -> bool {
1112 matches!(*self, Abi::Scalar(_))
1116 rustc_index::newtype_index! {
1117 pub struct VariantIdx {
1118 derive [HashStable_Generic]
1122 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1123 pub enum Variants<'a> {
1124 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1125 Single { index: VariantIdx },
1127 /// Enum-likes with more than one inhabited variant: each variant comes with
1128 /// a *discriminant* (usually the same as the variant index but the user can
1129 /// assign explicit discriminant values). That discriminant is encoded
1130 /// as a *tag* on the machine. The layout of each variant is
1131 /// a struct, and they all have space reserved for the tag.
1132 /// For enums, the tag is the sole field of the layout.
1135 tag_encoding: TagEncoding,
1137 variants: IndexVec<VariantIdx, Layout<'a>>,
1141 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1142 pub enum TagEncoding {
1143 /// The tag directly stores the discriminant, but possibly with a smaller layout
1144 /// (so converting the tag to the discriminant can require sign extension).
1147 /// Niche (values invalid for a type) encoding the discriminant:
1148 /// Discriminant and variant index coincide.
1149 /// The variant `untagged_variant` contains a niche at an arbitrary
1150 /// offset (field `tag_field` of the enum), which for a variant with
1151 /// discriminant `d` is set to
1152 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1154 /// For example, `Option<(usize, &T)>` is represented such that
1155 /// `None` has a null pointer for the second tuple field, and
1156 /// `Some` is the identity function (with a non-null reference).
1158 untagged_variant: VariantIdx,
1159 niche_variants: RangeInclusive<VariantIdx>,
1164 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1167 pub value: Primitive,
1168 pub valid_range: WrappingRange,
1172 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1173 let Scalar::Initialized { value, valid_range } = scalar else { return None };
1174 let niche = Niche { offset, value, valid_range };
1175 if niche.available(cx) > 0 { Some(niche) } else { None }
1178 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1179 let Self { value, valid_range: v, .. } = *self;
1180 let size = value.size(cx);
1181 assert!(size.bits() <= 128);
1182 let max_value = size.unsigned_int_max();
1184 // Find out how many values are outside the valid range.
1185 let niche = v.end.wrapping_add(1)..v.start;
1186 niche.end.wrapping_sub(niche.start) & max_value
1189 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1192 let Self { value, valid_range: v, .. } = *self;
1193 let size = value.size(cx);
1194 assert!(size.bits() <= 128);
1195 let max_value = size.unsigned_int_max();
1197 let niche = v.end.wrapping_add(1)..v.start;
1198 let available = niche.end.wrapping_sub(niche.start) & max_value;
1199 if count > available {
1203 // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1204 // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
1205 // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
1206 // Having `None` in niche zero can enable some special optimizations.
1208 // Bound selection criteria:
1209 // 1. Select closest to zero given wrapping semantics.
1210 // 2. Avoid moving past zero if possible.
1212 // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1213 // If niche zero is already reserved, the selection of bounds are of little interest.
1214 let move_start = |v: WrappingRange| {
1215 let start = v.start.wrapping_sub(count) & max_value;
1216 Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1218 let move_end = |v: WrappingRange| {
1219 let start = v.end.wrapping_add(1) & max_value;
1220 let end = v.end.wrapping_add(count) & max_value;
1221 Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1223 let distance_end_zero = max_value - v.end;
1224 if v.start > v.end {
1225 // zero is unavailable because wrapping occurs
1227 } else if v.start <= distance_end_zero {
1228 if count <= v.start {
1231 // moved past zero, use other bound
1235 let end = v.end.wrapping_add(count) & max_value;
1236 let overshot_zero = (1..=v.end).contains(&end);
1238 // moved past zero, use other bound
1247 #[derive(PartialEq, Eq, Hash, HashStable_Generic)]
1248 pub struct LayoutS<'a> {
1249 /// Says where the fields are located within the layout.
1250 pub fields: FieldsShape,
1252 /// Encodes information about multi-variant layouts.
1253 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1254 /// shared between all variants. One of them will be the discriminant,
1255 /// but e.g. generators can have more.
1257 /// To access all fields of this layout, both `fields` and the fields of the active variant
1258 /// must be taken into account.
1259 pub variants: Variants<'a>,
1261 /// The `abi` defines how this data is passed between functions, and it defines
1262 /// value restrictions via `valid_range`.
1264 /// Note that this is entirely orthogonal to the recursive structure defined by
1265 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1266 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1267 /// have to be taken into account to find all fields of this layout.
1270 /// The leaf scalar with the largest number of invalid values
1271 /// (i.e. outside of its `valid_range`), if it exists.
1272 pub largest_niche: Option<Niche>,
1274 pub align: AbiAndPrefAlign,
1278 impl<'a> LayoutS<'a> {
1279 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1280 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1281 let size = scalar.size(cx);
1282 let align = scalar.align(cx);
1284 variants: Variants::Single { index: VariantIdx::new(0) },
1285 fields: FieldsShape::Primitive,
1286 abi: Abi::Scalar(scalar),
1294 impl<'a> fmt::Debug for LayoutS<'a> {
1295 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1296 // This is how `Layout` used to print before it become
1297 // `Interned<LayoutS>`. We print it like this to avoid having to update
1298 // expected output in a lot of tests.
1299 let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
1300 f.debug_struct("Layout")
1301 .field("size", size)
1302 .field("align", align)
1304 .field("fields", fields)
1305 .field("largest_niche", largest_niche)
1306 .field("variants", variants)
1311 #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
1312 #[rustc_pass_by_value]
1313 pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
1315 impl<'a> fmt::Debug for Layout<'a> {
1316 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1317 // See comment on `<LayoutS as Debug>::fmt` above.
1322 impl<'a> Layout<'a> {
1323 pub fn fields(self) -> &'a FieldsShape {
1327 pub fn variants(self) -> &'a Variants<'a> {
1331 pub fn abi(self) -> Abi {
1335 pub fn largest_niche(self) -> Option<Niche> {
1336 self.0.0.largest_niche
1339 pub fn align(self) -> AbiAndPrefAlign {
1343 pub fn size(self) -> Size {
1348 /// The layout of a type, alongside the type itself.
1349 /// Provides various type traversal APIs (e.g., recursing into fields).
1351 /// Note that the layout is NOT guaranteed to always be identical
1352 /// to that obtained from `layout_of(ty)`, as we need to produce
1353 /// layouts for which Rust types do not exist, such as enum variants
1354 /// or synthetic fields of enums (i.e., discriminants) and fat pointers.
1355 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
1356 pub struct TyAndLayout<'a, Ty> {
1358 pub layout: Layout<'a>,
1361 impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
1362 type Target = &'a LayoutS<'a>;
1363 fn deref(&self) -> &&'a LayoutS<'a> {
1368 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1369 pub enum PointerKind {
1370 /// Most general case, we know no restrictions to tell LLVM.
1373 /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
1376 /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
1379 /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
1380 UniqueBorrowedPinned,
1382 /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
1383 /// nor `dereferenceable`.
1387 #[derive(Copy, Clone, Debug)]
1388 pub struct PointeeInfo {
1391 pub safe: Option<PointerKind>,
1392 pub address_space: AddressSpace,
1395 /// Used in `might_permit_raw_init` to indicate the kind of initialisation
1396 /// that is checked to be valid
1397 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
1400 UninitMitigated0x01Fill,
1403 /// Trait that needs to be implemented by the higher-level type representation
1404 /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
1405 pub trait TyAbiInterface<'a, C>: Sized {
1406 fn ty_and_layout_for_variant(
1407 this: TyAndLayout<'a, Self>,
1409 variant_index: VariantIdx,
1410 ) -> TyAndLayout<'a, Self>;
1411 fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>;
1412 fn ty_and_layout_pointee_info_at(
1413 this: TyAndLayout<'a, Self>,
1416 ) -> Option<PointeeInfo>;
1417 fn is_adt(this: TyAndLayout<'a, Self>) -> bool;
1418 fn is_never(this: TyAndLayout<'a, Self>) -> bool;
1419 fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
1420 fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
1423 impl<'a, Ty> TyAndLayout<'a, Ty> {
1424 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
1426 Ty: TyAbiInterface<'a, C>,
1428 Ty::ty_and_layout_for_variant(self, cx, variant_index)
1431 pub fn field<C>(self, cx: &C, i: usize) -> Self
1433 Ty: TyAbiInterface<'a, C>,
1435 Ty::ty_and_layout_field(self, cx, i)
1438 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
1440 Ty: TyAbiInterface<'a, C>,
1442 Ty::ty_and_layout_pointee_info_at(self, cx, offset)
1445 pub fn is_single_fp_element<C>(self, cx: &C) -> bool
1447 Ty: TyAbiInterface<'a, C>,
1451 Abi::Scalar(scalar) => scalar.primitive().is_float(),
1452 Abi::Aggregate { .. } => {
1453 if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
1454 self.field(cx, 0).is_single_fp_element(cx)
1463 pub fn is_adt<C>(self) -> bool
1465 Ty: TyAbiInterface<'a, C>,
1470 pub fn is_never<C>(self) -> bool
1472 Ty: TyAbiInterface<'a, C>,
1477 pub fn is_tuple<C>(self) -> bool
1479 Ty: TyAbiInterface<'a, C>,
1484 pub fn is_unit<C>(self) -> bool
1486 Ty: TyAbiInterface<'a, C>,
1492 impl<'a, Ty> TyAndLayout<'a, Ty> {
1493 /// Returns `true` if the layout corresponds to an unsized type.
1494 pub fn is_unsized(&self) -> bool {
1495 self.abi.is_unsized()
1499 pub fn is_sized(&self) -> bool {
1503 /// Returns `true` if the type is a ZST and not unsized.
1504 pub fn is_zst(&self) -> bool {
1506 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1507 Abi::Uninhabited => self.size.bytes() == 0,
1508 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,