4 use crate::spec::Target;
6 use std::convert::{TryFrom, TryInto};
9 use std::num::NonZeroUsize;
10 use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
11 use std::str::FromStr;
13 use rustc_data_structures::intern::Interned;
14 use rustc_index::vec::{Idx, IndexVec};
15 use rustc_macros::HashStable_Generic;
16 use rustc_serialize::json::{Json, ToJson};
20 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
21 /// for a target, which contains everything needed to compute layouts.
22 pub struct TargetDataLayout {
24 pub i1_align: AbiAndPrefAlign,
25 pub i8_align: AbiAndPrefAlign,
26 pub i16_align: AbiAndPrefAlign,
27 pub i32_align: AbiAndPrefAlign,
28 pub i64_align: AbiAndPrefAlign,
29 pub i128_align: AbiAndPrefAlign,
30 pub f32_align: AbiAndPrefAlign,
31 pub f64_align: AbiAndPrefAlign,
32 pub pointer_size: Size,
33 pub pointer_align: AbiAndPrefAlign,
34 pub aggregate_align: AbiAndPrefAlign,
36 /// Alignments for vector types.
37 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
39 pub instruction_address_space: AddressSpace,
41 /// Minimum size of #[repr(C)] enums (default I32 bits)
42 pub c_enum_min_size: Integer,
45 impl Default for TargetDataLayout {
46 /// Creates an instance of `TargetDataLayout`.
47 fn default() -> TargetDataLayout {
48 let align = |bits| Align::from_bits(bits).unwrap();
51 i1_align: AbiAndPrefAlign::new(align(8)),
52 i8_align: AbiAndPrefAlign::new(align(8)),
53 i16_align: AbiAndPrefAlign::new(align(16)),
54 i32_align: AbiAndPrefAlign::new(align(32)),
55 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
56 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
57 f32_align: AbiAndPrefAlign::new(align(32)),
58 f64_align: AbiAndPrefAlign::new(align(64)),
59 pointer_size: Size::from_bits(64),
60 pointer_align: AbiAndPrefAlign::new(align(64)),
61 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
63 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
64 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
66 instruction_address_space: AddressSpace::DATA,
67 c_enum_min_size: Integer::I32,
72 impl TargetDataLayout {
73 pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
74 // Parse an address space index from a string.
75 let parse_address_space = |s: &str, cause: &str| {
76 s.parse::<u32>().map(AddressSpace).map_err(|err| {
77 format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
81 // Parse a bit count from a string.
82 let parse_bits = |s: &str, kind: &str, cause: &str| {
83 s.parse::<u64>().map_err(|err| {
84 format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
88 // Parse a size string.
89 let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
91 // Parse an alignment string.
92 let align = |s: &[&str], cause: &str| {
94 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
96 let align_from_bits = |bits| {
97 Align::from_bits(bits).map_err(|err| {
98 format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
101 let abi = parse_bits(s[0], "alignment", cause)?;
102 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
103 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
106 let mut dl = TargetDataLayout::default();
107 let mut i128_align_src = 64;
108 for spec in target.data_layout.split('-') {
109 let spec_parts = spec.split(':').collect::<Vec<_>>();
112 ["e"] => dl.endian = Endian::Little,
113 ["E"] => dl.endian = Endian::Big,
114 [p] if p.starts_with('P') => {
115 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
117 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
118 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
119 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
120 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
121 dl.pointer_size = size(s, p)?;
122 dl.pointer_align = align(a, p)?;
124 [s, ref a @ ..] if s.starts_with('i') => {
125 let Ok(bits) = s[1..].parse::<u64>() else {
126 size(&s[1..], "i")?; // For the user error.
129 let a = align(a, s)?;
131 1 => dl.i1_align = a,
132 8 => dl.i8_align = a,
133 16 => dl.i16_align = a,
134 32 => dl.i32_align = a,
135 64 => dl.i64_align = a,
138 if bits >= i128_align_src && bits <= 128 {
139 // Default alignment for i128 is decided by taking the alignment of
140 // largest-sized i{64..=128}.
141 i128_align_src = bits;
145 [s, ref a @ ..] if s.starts_with('v') => {
146 let v_size = size(&s[1..], "v")?;
147 let a = align(a, s)?;
148 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
152 // No existing entry, add a new one.
153 dl.vector_align.push((v_size, a));
155 _ => {} // Ignore everything else.
159 // Perform consistency checks against the Target information.
160 if dl.endian != target.endian {
162 "inconsistent target specification: \"data-layout\" claims \
163 architecture is {}-endian, while \"target-endian\" is `{}`",
165 target.endian.as_str(),
169 if dl.pointer_size.bits() != target.pointer_width.into() {
171 "inconsistent target specification: \"data-layout\" claims \
172 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
173 dl.pointer_size.bits(),
178 dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
183 /// Returns exclusive upper bound on object size.
185 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
186 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
187 /// index every address within an object along with one byte past the end, along with allowing
188 /// `isize` to store the difference between any two pointers into an object.
190 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
191 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
192 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
193 /// address space on 64-bit ARMv8 and x86_64.
195 pub fn obj_size_bound(&self) -> u64 {
196 match self.pointer_size.bits() {
200 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
205 pub fn ptr_sized_integer(&self) -> Integer {
206 match self.pointer_size.bits() {
210 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
215 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
216 for &(size, align) in &self.vector_align {
217 if size == vec_size {
221 // Default to natural alignment, which is what LLVM does.
222 // That is, use the size, rounded up to a power of 2.
223 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
227 pub trait HasDataLayout {
228 fn data_layout(&self) -> &TargetDataLayout;
231 impl HasDataLayout for TargetDataLayout {
233 fn data_layout(&self) -> &TargetDataLayout {
238 /// Endianness of the target, which must match cfg(target-endian).
239 #[derive(Copy, Clone, PartialEq)]
246 pub fn as_str(&self) -> &'static str {
248 Self::Little => "little",
254 impl fmt::Debug for Endian {
255 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
256 f.write_str(self.as_str())
260 impl FromStr for Endian {
263 fn from_str(s: &str) -> Result<Self, Self::Err> {
265 "little" => Ok(Self::Little),
266 "big" => Ok(Self::Big),
267 _ => Err(format!(r#"unknown endian: "{}""#, s)),
272 impl ToJson for Endian {
273 fn to_json(&self) -> Json {
274 self.as_str().to_json()
278 /// Size of a type in bytes.
279 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
280 #[derive(HashStable_Generic)]
285 // This is debug-printed a lot in larger structs, don't waste too much space there
286 impl fmt::Debug for Size {
287 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
288 write!(f, "Size({} bytes)", self.bytes())
293 pub const ZERO: Size = Size { raw: 0 };
295 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
296 /// not a multiple of 8.
297 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
298 let bits = bits.try_into().ok().unwrap();
299 // Avoid potential overflow from `bits + 7`.
300 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
304 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
305 let bytes: u64 = bytes.try_into().ok().unwrap();
310 pub fn bytes(self) -> u64 {
315 pub fn bytes_usize(self) -> usize {
316 self.bytes().try_into().unwrap()
320 pub fn bits(self) -> u64 {
322 fn overflow(bytes: u64) -> ! {
323 panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
326 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
330 pub fn bits_usize(self) -> usize {
331 self.bits().try_into().unwrap()
335 pub fn align_to(self, align: Align) -> Size {
336 let mask = align.bytes() - 1;
337 Size::from_bytes((self.bytes() + mask) & !mask)
341 pub fn is_aligned(self, align: Align) -> bool {
342 let mask = align.bytes() - 1;
343 self.bytes() & mask == 0
347 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
348 let dl = cx.data_layout();
350 let bytes = self.bytes().checked_add(offset.bytes())?;
352 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
356 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
357 let dl = cx.data_layout();
359 let bytes = self.bytes().checked_mul(count)?;
360 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
363 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
364 /// (i.e., if it is negative, fill with 1's on the left).
366 pub fn sign_extend(self, value: u128) -> u128 {
367 let size = self.bits();
369 // Truncated until nothing is left.
373 let shift = 128 - size;
374 // Shift the unsigned value to the left, then shift back to the right as signed
375 // (essentially fills with sign bit on the left).
376 (((value << shift) as i128) >> shift) as u128
379 /// Truncates `value` to `self` bits.
381 pub fn truncate(self, value: u128) -> u128 {
382 let size = self.bits();
384 // Truncated until nothing is left.
387 let shift = 128 - size;
388 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
389 (value << shift) >> shift
393 pub fn signed_int_min(&self) -> i128 {
394 self.sign_extend(1_u128 << (self.bits() - 1)) as i128
398 pub fn signed_int_max(&self) -> i128 {
399 i128::MAX >> (128 - self.bits())
403 pub fn unsigned_int_max(&self) -> u128 {
404 u128::MAX >> (128 - self.bits())
408 // Panicking addition, subtraction and multiplication for convenience.
409 // Avoid during layout computation, return `LayoutError` instead.
414 fn add(self, other: Size) -> Size {
415 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
416 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
424 fn sub(self, other: Size) -> Size {
425 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
426 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
431 impl Mul<Size> for u64 {
434 fn mul(self, size: Size) -> Size {
439 impl Mul<u64> for Size {
442 fn mul(self, count: u64) -> Size {
443 match self.bytes().checked_mul(count) {
444 Some(bytes) => Size::from_bytes(bytes),
445 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
450 impl AddAssign for Size {
452 fn add_assign(&mut self, other: Size) {
453 *self = *self + other;
459 fn steps_between(start: &Self, end: &Self) -> Option<usize> {
460 u64::steps_between(&start.bytes(), &end.bytes())
464 fn forward_checked(start: Self, count: usize) -> Option<Self> {
465 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
469 fn forward(start: Self, count: usize) -> Self {
470 Self::from_bytes(u64::forward(start.bytes(), count))
474 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
475 Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
479 fn backward_checked(start: Self, count: usize) -> Option<Self> {
480 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
484 fn backward(start: Self, count: usize) -> Self {
485 Self::from_bytes(u64::backward(start.bytes(), count))
489 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
490 Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
494 /// Alignment of a type in bytes (always a power of two).
495 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
496 #[derive(HashStable_Generic)]
501 // This is debug-printed a lot in larger structs, don't waste too much space there
502 impl fmt::Debug for Align {
503 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
504 write!(f, "Align({} bytes)", self.bytes())
509 pub const ONE: Align = Align { pow2: 0 };
512 pub fn from_bits(bits: u64) -> Result<Align, String> {
513 Align::from_bytes(Size::from_bits(bits).bytes())
517 pub fn from_bytes(align: u64) -> Result<Align, String> {
518 // Treat an alignment of 0 bytes like 1-byte alignment.
520 return Ok(Align::ONE);
524 fn not_power_of_2(align: u64) -> String {
525 format!("`{}` is not a power of 2", align)
529 fn too_large(align: u64) -> String {
530 format!("`{}` is too large", align)
533 let mut bytes = align;
534 let mut pow2: u8 = 0;
535 while (bytes & 1) == 0 {
540 return Err(not_power_of_2(align));
543 return Err(too_large(align));
550 pub fn bytes(self) -> u64 {
555 pub fn bits(self) -> u64 {
559 /// Computes the best alignment possible for the given offset
560 /// (the largest power of two that the offset is a multiple of).
562 /// N.B., for an offset of `0`, this happens to return `2^64`.
564 pub fn max_for_offset(offset: Size) -> Align {
565 Align { pow2: offset.bytes().trailing_zeros() as u8 }
568 /// Lower the alignment, if necessary, such that the given offset
569 /// is aligned to it (the offset is a multiple of the alignment).
571 pub fn restrict_for_offset(self, offset: Size) -> Align {
572 self.min(Align::max_for_offset(offset))
576 /// A pair of alignments, ABI-mandated and preferred.
577 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)]
578 #[derive(HashStable_Generic)]
579 pub struct AbiAndPrefAlign {
584 impl AbiAndPrefAlign {
586 pub fn new(align: Align) -> AbiAndPrefAlign {
587 AbiAndPrefAlign { abi: align, pref: align }
591 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
592 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
596 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
597 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
601 /// Integers, also used for enum discriminants.
602 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
613 pub fn size(self) -> Size {
615 I8 => Size::from_bytes(1),
616 I16 => Size::from_bytes(2),
617 I32 => Size::from_bytes(4),
618 I64 => Size::from_bytes(8),
619 I128 => Size::from_bytes(16),
623 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
624 let dl = cx.data_layout();
631 I128 => dl.i128_align,
635 /// Finds the smallest Integer type which can represent the signed value.
637 pub fn fit_signed(x: i128) -> Integer {
639 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
640 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
641 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
642 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
647 /// Finds the smallest Integer type which can represent the unsigned value.
649 pub fn fit_unsigned(x: u128) -> Integer {
651 0..=0x0000_0000_0000_00ff => I8,
652 0..=0x0000_0000_0000_ffff => I16,
653 0..=0x0000_0000_ffff_ffff => I32,
654 0..=0xffff_ffff_ffff_ffff => I64,
659 /// Finds the smallest integer with the given alignment.
660 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
661 let dl = cx.data_layout();
663 for candidate in [I8, I16, I32, I64, I128] {
664 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
665 return Some(candidate);
671 /// Find the largest integer with the given alignment or less.
672 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
673 let dl = cx.data_layout();
675 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
676 for candidate in [I64, I32, I16] {
677 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
684 // FIXME(eddyb) consolidate this and other methods that find the appropriate
685 // `Integer` given some requirements.
687 fn from_size(size: Size) -> Result<Self, String> {
689 8 => Ok(Integer::I8),
690 16 => Ok(Integer::I16),
691 32 => Ok(Integer::I32),
692 64 => Ok(Integer::I64),
693 128 => Ok(Integer::I128),
694 _ => Err(format!("rust does not support integers with {} bits", size.bits())),
699 /// Fundamental unit of memory access and layout.
700 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
702 /// The `bool` is the signedness of the `Integer` type.
704 /// One would think we would not care about such details this low down,
705 /// but some ABIs are described in terms of C types and ISAs where the
706 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
707 /// a negative integer passed by zero-extension will appear positive in
708 /// the callee, and most operations on it will produce the wrong values.
716 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
717 let dl = cx.data_layout();
720 Int(i, _) => i.size(),
721 F32 => Size::from_bits(32),
722 F64 => Size::from_bits(64),
723 Pointer => dl.pointer_size,
727 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
728 let dl = cx.data_layout();
731 Int(i, _) => i.align(dl),
734 Pointer => dl.pointer_align,
738 // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
740 pub fn is_float(self) -> bool {
741 matches!(self, F32 | F64)
744 // FIXME(eddyb) remove, it's completely unused.
746 pub fn is_int(self) -> bool {
747 matches!(self, Int(..))
751 /// Inclusive wrap-around range of valid values, that is, if
752 /// start > end, it represents `start..=MAX`,
753 /// followed by `0..=end`.
755 /// That is, for an i8 primitive, a range of `254..=2` means following
758 /// 254 (-2), 255 (-1), 0, 1, 2
760 /// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
761 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
762 #[derive(HashStable_Generic)]
763 pub struct WrappingRange {
769 pub fn full(size: Size) -> Self {
770 Self { start: 0, end: size.unsigned_int_max() }
773 /// Returns `true` if `v` is contained in the range.
775 pub fn contains(&self, v: u128) -> bool {
776 if self.start <= self.end {
777 self.start <= v && v <= self.end
779 self.start <= v || v <= self.end
783 /// Returns `self` with replaced `start`
785 pub fn with_start(mut self, start: u128) -> Self {
790 /// Returns `self` with replaced `end`
792 pub fn with_end(mut self, end: u128) -> Self {
797 /// Returns `true` if `size` completely fills the range.
799 pub fn is_full_for(&self, size: Size) -> bool {
800 let max_value = size.unsigned_int_max();
801 debug_assert!(self.start <= max_value && self.end <= max_value);
802 self.start == (self.end.wrapping_add(1) & max_value)
806 impl fmt::Debug for WrappingRange {
807 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
808 if self.start > self.end {
809 write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
811 write!(fmt, "{}..={}", self.start, self.end)?;
817 /// Information about one scalar component of a Rust type.
818 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
819 #[derive(HashStable_Generic)]
824 // FIXME(eddyb) always use the shortest range, e.g., by finding
825 // the largest space between two consecutive valid values and
826 // taking everything else as the (shortest) valid range.
827 valid_range: WrappingRange,
830 /// Even for unions, we need to use the correct registers for the kind of
831 /// values inside the union, so we keep the `Primitive` type around. We
832 /// also use it to compute the size of the scalar.
833 /// However, unions never have niches and even allow undef,
834 /// so there is no `valid_range`.
841 pub fn is_bool(&self) -> bool {
844 Scalar::Initialized {
845 value: Int(I8, false),
846 valid_range: WrappingRange { start: 0, end: 1 }
851 /// Get the primitive representation of this type, ignoring the valid range and whether the
852 /// value is allowed to be undefined (due to being a union).
853 pub fn primitive(&self) -> Primitive {
855 Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
859 pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
860 self.primitive().align(cx)
863 pub fn size(self, cx: &impl HasDataLayout) -> Size {
864 self.primitive().size(cx)
868 pub fn to_union(&self) -> Self {
869 Self::Union { value: self.primitive() }
873 pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
875 Scalar::Initialized { valid_range, .. } => valid_range,
876 Scalar::Union { value } => WrappingRange::full(value.size(cx)),
881 /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
882 pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
884 Scalar::Initialized { valid_range, .. } => valid_range,
885 Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
889 /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
891 pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
893 Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
894 Scalar::Union { .. } => true,
899 /// Describes how the fields of a type are located in memory.
900 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
901 pub enum FieldsShape {
902 /// Scalar primitives and `!`, which never have fields.
905 /// All fields start at no offset. The `usize` is the field count.
908 /// Array/vector-like placement, with all fields of identical types.
909 Array { stride: Size, count: u64 },
911 /// Struct-like placement, with precomputed offsets.
913 /// Fields are guaranteed to not overlap, but note that gaps
914 /// before, between and after all the fields are NOT always
915 /// padding, and as such their contents may not be discarded.
916 /// For example, enum variants leave a gap at the start,
917 /// where the discriminant field in the enum layout goes.
919 /// Offsets for the first byte of each field,
920 /// ordered to match the source definition order.
921 /// This vector does not go in increasing order.
922 // FIXME(eddyb) use small vector optimization for the common case.
925 /// Maps source order field indices to memory order indices,
926 /// depending on how the fields were reordered (if at all).
927 /// This is a permutation, with both the source order and the
928 /// memory order using the same (0..n) index ranges.
930 /// Note that during computation of `memory_index`, sometimes
931 /// it is easier to operate on the inverse mapping (that is,
932 /// from memory order to source order), and that is usually
933 /// named `inverse_memory_index`.
935 // FIXME(eddyb) build a better abstraction for permutations, if possible.
936 // FIXME(camlorn) also consider small vector optimization here.
937 memory_index: Vec<u32>,
943 pub fn count(&self) -> usize {
945 FieldsShape::Primitive => 0,
946 FieldsShape::Union(count) => count.get(),
947 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
948 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
953 pub fn offset(&self, i: usize) -> Size {
955 FieldsShape::Primitive => {
956 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
958 FieldsShape::Union(count) => {
961 "tried to access field {} of union with {} fields",
967 FieldsShape::Array { stride, count } => {
968 let i = u64::try_from(i).unwrap();
972 FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
977 pub fn memory_index(&self, i: usize) -> usize {
979 FieldsShape::Primitive => {
980 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
982 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
983 FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
987 /// Gets source indices of the fields by increasing offsets.
989 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
990 let mut inverse_small = [0u8; 64];
991 let mut inverse_big = vec![];
992 let use_small = self.count() <= inverse_small.len();
994 // We have to write this logic twice in order to keep the array small.
995 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
997 for i in 0..self.count() {
998 inverse_small[memory_index[i] as usize] = i as u8;
1001 inverse_big = vec![0; self.count()];
1002 for i in 0..self.count() {
1003 inverse_big[memory_index[i] as usize] = i as u32;
1008 (0..self.count()).map(move |i| match *self {
1009 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1010 FieldsShape::Arbitrary { .. } => {
1012 inverse_small[i] as usize
1014 inverse_big[i] as usize
1021 /// An identifier that specifies the address space that some operation
1022 /// should operate on. Special address spaces have an effect on code generation,
1023 /// depending on the target and the address spaces it implements.
1024 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
1025 pub struct AddressSpace(pub u32);
1028 /// The default address space, corresponding to data space.
1029 pub const DATA: Self = AddressSpace(0);
1032 /// Describes how values of the type are passed by target ABIs,
1033 /// in terms of categories of C types there are ABI rules for.
1034 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1038 ScalarPair(Scalar, Scalar),
1044 /// If true, the size is exact, otherwise it's only a lower bound.
1050 /// Returns `true` if the layout corresponds to an unsized type.
1052 pub fn is_unsized(&self) -> bool {
1054 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1055 Abi::Aggregate { sized } => !sized,
1059 /// Returns `true` if this is a single signed integer scalar
1061 pub fn is_signed(&self) -> bool {
1063 Abi::Scalar(scal) => match scal.primitive() {
1064 Primitive::Int(_, signed) => signed,
1067 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
1071 /// Returns `true` if this is an uninhabited type
1073 pub fn is_uninhabited(&self) -> bool {
1074 matches!(*self, Abi::Uninhabited)
1077 /// Returns `true` is this is a scalar type
1079 pub fn is_scalar(&self) -> bool {
1080 matches!(*self, Abi::Scalar(_))
1084 rustc_index::newtype_index! {
1085 pub struct VariantIdx {
1086 derive [HashStable_Generic]
1090 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1091 pub enum Variants<'a> {
1092 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1093 Single { index: VariantIdx },
1095 /// Enum-likes with more than one inhabited variant: each variant comes with
1096 /// a *discriminant* (usually the same as the variant index but the user can
1097 /// assign explicit discriminant values). That discriminant is encoded
1098 /// as a *tag* on the machine. The layout of each variant is
1099 /// a struct, and they all have space reserved for the tag.
1100 /// For enums, the tag is the sole field of the layout.
1103 tag_encoding: TagEncoding,
1105 variants: IndexVec<VariantIdx, Layout<'a>>,
1109 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1110 pub enum TagEncoding {
1111 /// The tag directly stores the discriminant, but possibly with a smaller layout
1112 /// (so converting the tag to the discriminant can require sign extension).
1115 /// Niche (values invalid for a type) encoding the discriminant:
1116 /// Discriminant and variant index coincide.
1117 /// The variant `dataful_variant` contains a niche at an arbitrary
1118 /// offset (field `tag_field` of the enum), which for a variant with
1119 /// discriminant `d` is set to
1120 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1122 /// For example, `Option<(usize, &T)>` is represented such that
1123 /// `None` has a null pointer for the second tuple field, and
1124 /// `Some` is the identity function (with a non-null reference).
1126 dataful_variant: VariantIdx,
1127 niche_variants: RangeInclusive<VariantIdx>,
1132 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1135 pub value: Primitive,
1136 pub valid_range: WrappingRange,
1140 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1141 let Scalar::Initialized { value, valid_range } = scalar else { return None };
1142 let niche = Niche { offset, value, valid_range };
1143 if niche.available(cx) > 0 { Some(niche) } else { None }
1146 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1147 let Self { value, valid_range: v, .. } = *self;
1148 let size = value.size(cx);
1149 assert!(size.bits() <= 128);
1150 let max_value = size.unsigned_int_max();
1152 // Find out how many values are outside the valid range.
1153 let niche = v.end.wrapping_add(1)..v.start;
1154 niche.end.wrapping_sub(niche.start) & max_value
1157 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1160 let Self { value, valid_range: v, .. } = *self;
1161 let size = value.size(cx);
1162 assert!(size.bits() <= 128);
1163 let max_value = size.unsigned_int_max();
1165 let niche = v.end.wrapping_add(1)..v.start;
1166 let available = niche.end.wrapping_sub(niche.start) & max_value;
1167 if count > available {
1171 // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1172 // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
1173 // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
1174 // Having `None` in niche zero can enable some special optimizations.
1176 // Bound selection criteria:
1177 // 1. Select closest to zero given wrapping semantics.
1178 // 2. Avoid moving past zero if possible.
1180 // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1181 // If niche zero is already reserved, the selection of bounds are of little interest.
1182 let move_start = |v: WrappingRange| {
1183 let start = v.start.wrapping_sub(count) & max_value;
1184 Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1186 let move_end = |v: WrappingRange| {
1187 let start = v.end.wrapping_add(1) & max_value;
1188 let end = v.end.wrapping_add(count) & max_value;
1189 Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1191 let distance_end_zero = max_value - v.end;
1192 if v.start > v.end {
1193 // zero is unavailable because wrapping occurs
1195 } else if v.start <= distance_end_zero {
1196 if count <= v.start {
1199 // moved past zero, use other bound
1203 let end = v.end.wrapping_add(count) & max_value;
1204 let overshot_zero = (1..=v.end).contains(&end);
1206 // moved past zero, use other bound
1215 #[derive(PartialEq, Eq, Hash, HashStable_Generic)]
1216 pub struct LayoutS<'a> {
1217 /// Says where the fields are located within the layout.
1218 pub fields: FieldsShape,
1220 /// Encodes information about multi-variant layouts.
1221 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1222 /// shared between all variants. One of them will be the discriminant,
1223 /// but e.g. generators can have more.
1225 /// To access all fields of this layout, both `fields` and the fields of the active variant
1226 /// must be taken into account.
1227 pub variants: Variants<'a>,
1229 /// The `abi` defines how this data is passed between functions, and it defines
1230 /// value restrictions via `valid_range`.
1232 /// Note that this is entirely orthogonal to the recursive structure defined by
1233 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1234 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1235 /// have to be taken into account to find all fields of this layout.
1238 /// The leaf scalar with the largest number of invalid values
1239 /// (i.e. outside of its `valid_range`), if it exists.
1240 pub largest_niche: Option<Niche>,
1242 pub align: AbiAndPrefAlign,
1246 impl<'a> LayoutS<'a> {
1247 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1248 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1249 let size = scalar.size(cx);
1250 let align = scalar.align(cx);
1252 variants: Variants::Single { index: VariantIdx::new(0) },
1253 fields: FieldsShape::Primitive,
1254 abi: Abi::Scalar(scalar),
1262 impl<'a> fmt::Debug for LayoutS<'a> {
1263 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1264 // This is how `Layout` used to print before it become
1265 // `Interned<LayoutS>`. We print it like this to avoid having to update
1266 // expected output in a lot of tests.
1267 f.debug_struct("Layout")
1268 .field("fields", &self.fields)
1269 .field("variants", &self.variants)
1270 .field("abi", &self.abi)
1271 .field("largest_niche", &self.largest_niche)
1272 .field("align", &self.align)
1273 .field("size", &self.size)
1278 #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
1279 #[rustc_pass_by_value]
1280 pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
1282 impl<'a> fmt::Debug for Layout<'a> {
1283 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1284 // See comment on `<LayoutS as Debug>::fmt` above.
1289 impl<'a> Layout<'a> {
1290 pub fn fields(self) -> &'a FieldsShape {
1294 pub fn variants(self) -> &'a Variants<'a> {
1298 pub fn abi(self) -> Abi {
1302 pub fn largest_niche(self) -> Option<Niche> {
1303 self.0.0.largest_niche
1306 pub fn align(self) -> AbiAndPrefAlign {
1310 pub fn size(self) -> Size {
1315 /// The layout of a type, alongside the type itself.
1316 /// Provides various type traversal APIs (e.g., recursing into fields).
1318 /// Note that the layout is NOT guaranteed to always be identical
1319 /// to that obtained from `layout_of(ty)`, as we need to produce
1320 /// layouts for which Rust types do not exist, such as enum variants
1321 /// or synthetic fields of enums (i.e., discriminants) and fat pointers.
1322 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
1323 pub struct TyAndLayout<'a, Ty> {
1325 pub layout: Layout<'a>,
1328 impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
1329 type Target = &'a LayoutS<'a>;
1330 fn deref(&self) -> &&'a LayoutS<'a> {
1335 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1336 pub enum PointerKind {
1337 /// Most general case, we know no restrictions to tell LLVM.
1340 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
1343 /// `&mut T` which is `noalias` but not `readonly`.
1346 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
1350 #[derive(Copy, Clone, Debug)]
1351 pub struct PointeeInfo {
1354 pub safe: Option<PointerKind>,
1355 pub address_space: AddressSpace,
1358 /// Trait that needs to be implemented by the higher-level type representation
1359 /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
1360 pub trait TyAbiInterface<'a, C>: Sized {
1361 fn ty_and_layout_for_variant(
1362 this: TyAndLayout<'a, Self>,
1364 variant_index: VariantIdx,
1365 ) -> TyAndLayout<'a, Self>;
1366 fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>;
1367 fn ty_and_layout_pointee_info_at(
1368 this: TyAndLayout<'a, Self>,
1371 ) -> Option<PointeeInfo>;
1372 fn is_adt(this: TyAndLayout<'a, Self>) -> bool;
1373 fn is_never(this: TyAndLayout<'a, Self>) -> bool;
1374 fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
1375 fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
1378 impl<'a, Ty> TyAndLayout<'a, Ty> {
1379 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
1381 Ty: TyAbiInterface<'a, C>,
1383 Ty::ty_and_layout_for_variant(self, cx, variant_index)
1386 pub fn field<C>(self, cx: &C, i: usize) -> Self
1388 Ty: TyAbiInterface<'a, C>,
1390 Ty::ty_and_layout_field(self, cx, i)
1393 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
1395 Ty: TyAbiInterface<'a, C>,
1397 Ty::ty_and_layout_pointee_info_at(self, cx, offset)
1400 pub fn is_single_fp_element<C>(self, cx: &C) -> bool
1402 Ty: TyAbiInterface<'a, C>,
1406 Abi::Scalar(scalar) => scalar.primitive().is_float(),
1407 Abi::Aggregate { .. } => {
1408 if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
1409 self.field(cx, 0).is_single_fp_element(cx)
1418 pub fn is_adt<C>(self) -> bool
1420 Ty: TyAbiInterface<'a, C>,
1425 pub fn is_never<C>(self) -> bool
1427 Ty: TyAbiInterface<'a, C>,
1432 pub fn is_tuple<C>(self) -> bool
1434 Ty: TyAbiInterface<'a, C>,
1439 pub fn is_unit<C>(self) -> bool
1441 Ty: TyAbiInterface<'a, C>,
1447 impl<'a, Ty> TyAndLayout<'a, Ty> {
1448 /// Returns `true` if the layout corresponds to an unsized type.
1449 pub fn is_unsized(&self) -> bool {
1450 self.abi.is_unsized()
1453 /// Returns `true` if the type is a ZST and not unsized.
1454 pub fn is_zst(&self) -> bool {
1456 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1457 Abi::Uninhabited => self.size.bytes() == 0,
1458 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
1462 /// Determines if this type permits "raw" initialization by just transmuting some
1463 /// memory into an instance of `T`.
1464 /// `zero` indicates if the memory is zero-initialized, or alternatively
1465 /// left entirely uninitialized.
1466 /// This is conservative: in doubt, it will answer `true`.
1468 /// FIXME: Once we removed all the conservatism, we could alternatively
1469 /// create an all-0/all-undef constant and run the const value validator to see if
1470 /// this is a valid value for the given type.
1471 pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool
1474 Ty: TyAbiInterface<'a, C>,
1477 let scalar_allows_raw_init = move |s: Scalar| -> bool {
1479 // The range must contain 0.
1480 s.valid_range(cx).contains(0)
1482 // The range must include all values.
1483 s.is_always_valid(cx)
1488 let valid = match self.abi {
1489 Abi::Uninhabited => false, // definitely UB
1490 Abi::Scalar(s) => scalar_allows_raw_init(s),
1491 Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
1492 Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
1493 Abi::Aggregate { .. } => true, // Fields are checked below.
1496 // This is definitely not okay.
1500 // If we have not found an error yet, we need to recursively descend into fields.
1501 match &self.fields {
1502 FieldsShape::Primitive | FieldsShape::Union { .. } => {}
1503 FieldsShape::Array { .. } => {
1504 // FIXME(#66151): For now, we are conservative and do not check arrays.
1506 FieldsShape::Arbitrary { offsets, .. } => {
1507 for idx in 0..offsets.len() {
1508 if !self.field(cx, idx).might_permit_raw_init(cx, zero) {
1509 // We found a field that is unhappy with this kind of initialization.
1516 // FIXME(#66151): For now, we are conservative and do not check `self.variants`.