1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_attr as attr;
9 use rustc_hir::def_id::DefId;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
25 use std::num::NonZeroUsize;
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
31 pub fn provide(providers: &mut ty::query::Providers) {
33 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
36 pub trait IntegerExt {
37 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
50 impl IntegerExt for Integer {
52 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53 match (*self, signed) {
54 (I8, false) => tcx.types.u8,
55 (I16, false) => tcx.types.u16,
56 (I32, false) => tcx.types.u32,
57 (I64, false) => tcx.types.u64,
58 (I128, false) => tcx.types.u128,
59 (I8, true) => tcx.types.i8,
60 (I16, true) => tcx.types.i16,
61 (I32, true) => tcx.types.i32,
62 (I64, true) => tcx.types.i64,
63 (I128, true) => tcx.types.i128,
67 /// Gets the Integer type from an attr::IntType.
68 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69 let dl = cx.data_layout();
72 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78 dl.ptr_sized_integer()
83 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
86 ty::IntTy::I16 => I16,
87 ty::IntTy::I32 => I32,
88 ty::IntTy::I64 => I64,
89 ty::IntTy::I128 => I128,
90 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
93 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
96 ty::UintTy::U16 => I16,
97 ty::UintTy::U32 => I32,
98 ty::UintTy::U64 => I64,
99 ty::UintTy::U128 => I128,
100 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
104 /// Finds the appropriate Integer type and signedness for the given
105 /// signed discriminant range and `#[repr]` attribute.
106 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107 /// that shouldn't affect anything, other than maybe debuginfo.
114 ) -> (Integer, bool) {
115 // Theoretically, negative values could be larger in unsigned representation
116 // than the unsigned representation of the signed minimum. However, if there
117 // are any negative values, the only valid unsigned representation is u128
118 // which can fit all i128 values, so the result remains unaffected.
119 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
122 if let Some(ity) = repr.int {
123 let discr = Integer::from_attr(&tcx, ity);
124 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
127 "Integer::repr_discr: `#[repr]` hint too small for \
128 discriminant range of enum `{}",
132 return (discr, ity.is_signed());
135 let at_least = if repr.c() {
136 // This is usually I32, however it can be different on some platforms,
137 // notably hexagon and arm-none/thumb-none
138 tcx.data_layout().c_enum_min_size
140 // repr(Rust) enums try to be as small as possible
144 // If there are no negative values, we can use the unsigned fit.
146 (cmp::max(unsigned_fit, at_least), false)
148 (cmp::max(signed_fit, at_least), true)
153 pub trait PrimitiveExt {
154 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 impl PrimitiveExt for Primitive {
160 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
162 Int(i, signed) => i.to_ty(tcx, signed),
163 F32 => tcx.types.f32,
164 F64 => tcx.types.f64,
165 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169 /// Return an *integer* type matching this primitive.
170 /// Useful in particular when dealing with enum discriminants.
172 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
174 Int(i, signed) => i.to_ty(tcx, signed),
175 Pointer => tcx.types.usize,
176 F32 | F64 => bug!("floats do not have an int type"),
181 /// The first half of a fat pointer.
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
187 /// The second half of a fat pointer.
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
193 /// The maximum supported number of lanes in a SIMD vector.
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
203 SizeOverflow(Ty<'tcx>),
204 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
210 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211 LayoutError::SizeOverflow(ty) => {
212 write!(f, "values of the type `{}` are too big for the current architecture", ty)
214 LayoutError::NormalizationFailure(t, e) => write!(
216 "unable to determine layout for `{}` because `{}` cannot be normalized",
218 e.get_type_for_failure()
224 /// Enforce some basic invariants on layouts.
225 fn sanity_check_layout<'tcx>(
227 param_env: ty::ParamEnv<'tcx>,
228 layout: &TyAndLayout<'tcx>,
230 // Type-level uninhabitedness should always imply ABI uninhabitedness.
231 if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) {
232 assert!(layout.abi.is_uninhabited());
235 if cfg!(debug_assertions) {
236 fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) {
238 Abi::Scalar(_scalar) => {
239 // No padding in scalars.
243 scalar.align(&tcx).abi,
244 "alignment mismatch between ABI and layout in {layout:#?}"
249 "size mismatch between ABI and layout in {layout:#?}"
252 Abi::Vector { count, element } => {
253 // No padding in vectors. Alignment can be strengthened, though.
255 layout.align().abi >= element.align(&tcx).abi,
256 "alignment mismatch between ABI and layout in {layout:#?}"
258 let size = element.size(&tcx) * count;
261 size.align_to(tcx.data_layout().vector_align(size).abi),
262 "size mismatch between ABI and layout in {layout:#?}"
265 Abi::ScalarPair(scalar1, scalar2) => {
266 // Sanity-check scalar pairs. These are a bit more flexible and support
267 // padding, but we can at least ensure both fields actually fit into the layout
268 // and the alignment requirement has not been weakened.
269 let align1 = scalar1.align(&tcx).abi;
270 let align2 = scalar2.align(&tcx).abi;
272 layout.align().abi >= cmp::max(align1, align2),
273 "alignment mismatch between ABI and layout in {layout:#?}",
275 let field2_offset = scalar1.size(&tcx).align_to(align2);
277 layout.size() >= field2_offset + scalar2.size(&tcx),
278 "size mismatch between ABI and layout in {layout:#?}"
281 Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
285 check_layout_abi(tcx, layout.layout);
287 if let Variants::Multiple { variants, .. } = &layout.variants {
288 for variant in variants {
289 check_layout_abi(tcx, *variant);
290 // No nested "multiple".
291 assert!(matches!(variant.variants(), Variants::Single { .. }));
292 // Skip empty variants.
293 if variant.size() == Size::ZERO
294 || variant.fields().count() == 0
295 || variant.abi().is_uninhabited()
297 // These are never actually accessed anyway, so we can skip them. (Note that
298 // sometimes, variants with fields have size 0, and sometimes, variants without
299 // fields have non-0 size.)
302 // Variants should have the same or a smaller size as the full thing.
303 if variant.size() > layout.size {
305 "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
307 variant.size().bytes(),
310 // The top-level ABI and the ABI of the variants should be coherent.
311 let abi_coherent = match (layout.abi, variant.abi()) {
312 (Abi::Scalar(..), Abi::Scalar(..)) => true,
313 (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
314 (Abi::Uninhabited, _) => true,
315 (Abi::Aggregate { .. }, _) => true,
320 "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
329 #[instrument(skip(tcx, query), level = "debug")]
332 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
333 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
334 ty::tls::with_related_context(tcx, move |icx| {
335 let (param_env, ty) = query.into_parts();
338 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
339 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
342 // Update the ImplicitCtxt to increase the layout_depth
343 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
345 ty::tls::enter_context(&icx, |_| {
346 let param_env = param_env.with_reveal_all_normalized(tcx);
347 let unnormalized_ty = ty;
349 // FIXME: We might want to have two different versions of `layout_of`:
350 // One that can be called after typecheck has completed and can use
351 // `normalize_erasing_regions` here and another one that can be called
352 // before typecheck has completed and uses `try_normalize_erasing_regions`.
353 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
355 Err(normalization_error) => {
356 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
360 if ty != unnormalized_ty {
361 // Ensure this layout is also cached for the normalized type.
362 return tcx.layout_of(param_env.and(ty));
365 let cx = LayoutCx { tcx, param_env };
367 let layout = cx.layout_of_uncached(ty)?;
368 let layout = TyAndLayout { ty, layout };
370 cx.record_layout_for_printing(layout);
372 sanity_check_layout(tcx, param_env, &layout);
379 pub struct LayoutCx<'tcx, C> {
381 pub param_env: ty::ParamEnv<'tcx>,
384 #[derive(Copy, Clone, Debug)]
386 /// A tuple, closure, or univariant which cannot be coerced to unsized.
388 /// A univariant, the last field of which may be coerced to unsized.
390 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
391 Prefixed(Size, Align),
394 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
395 // This is used to go between `memory_index` (source field order to memory order)
396 // and `inverse_memory_index` (memory order to source field order).
397 // See also `FieldsShape::Arbitrary::memory_index` for more details.
398 // FIXME(eddyb) build a better abstraction for permutations, if possible.
399 fn invert_mapping(map: &[u32]) -> Vec<u32> {
400 let mut inverse = vec![0; map.len()];
401 for i in 0..map.len() {
402 inverse[map[i] as usize] = i as u32;
407 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
408 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
409 let dl = self.data_layout();
410 let b_align = b.align(dl);
411 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
412 let b_offset = a.size(dl).align_to(b_align.abi);
413 let size = (b_offset + b.size(dl)).align_to(align.abi);
415 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
416 // returns the last maximum.
417 let largest_niche = Niche::from_scalar(dl, b_offset, b)
419 .chain(Niche::from_scalar(dl, Size::ZERO, a))
420 .max_by_key(|niche| niche.available(dl));
423 variants: Variants::Single { index: VariantIdx::new(0) },
424 fields: FieldsShape::Arbitrary {
425 offsets: vec![Size::ZERO, b_offset],
426 memory_index: vec![0, 1],
428 abi: Abi::ScalarPair(a, b),
435 fn univariant_uninterned(
438 fields: &[TyAndLayout<'_>],
441 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
442 let dl = self.data_layout();
443 let pack = repr.pack;
444 if pack.is_some() && repr.align.is_some() {
445 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
446 return Err(LayoutError::Unknown(ty));
449 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
451 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
453 let optimize = !repr.inhibit_struct_field_reordering_opt();
456 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
457 let optimizing = &mut inverse_memory_index[..end];
458 let field_align = |f: &TyAndLayout<'_>| {
459 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
462 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
463 // the field ordering to try and catch some code making assumptions about layouts
464 // we don't guarantee
465 if repr.can_randomize_type_layout() {
466 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
467 // randomize field ordering with
468 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
470 // Shuffle the ordering of the fields
471 optimizing.shuffle(&mut rng);
473 // Otherwise we just leave things alone and actually optimize the type's fields
476 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
477 optimizing.sort_by_key(|&x| {
478 // Place ZSTs first to avoid "interesting offsets",
479 // especially with only one or two non-ZST fields.
480 let f = &fields[x as usize];
481 (!f.is_zst(), cmp::Reverse(field_align(f)))
485 StructKind::Prefixed(..) => {
486 // Sort in ascending alignment so that the layout stays optimal
487 // regardless of the prefix
488 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
492 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
493 // regardless of the status of `-Z randomize-layout`
497 // inverse_memory_index holds field indices by increasing memory offset.
498 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
499 // We now write field offsets to the corresponding offset slot;
500 // field 5 with offset 0 puts 0 in offsets[5].
501 // At the bottom of this function, we invert `inverse_memory_index` to
502 // produce `memory_index` (see `invert_mapping`).
504 let mut sized = true;
505 let mut offsets = vec![Size::ZERO; fields.len()];
506 let mut offset = Size::ZERO;
507 let mut largest_niche = None;
508 let mut largest_niche_available = 0;
510 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
512 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
513 align = align.max(AbiAndPrefAlign::new(prefix_align));
514 offset = prefix_size.align_to(prefix_align);
517 for &i in &inverse_memory_index {
518 let field = fields[i as usize];
520 self.tcx.sess.delay_span_bug(
523 "univariant: field #{} of `{}` comes after unsized field",
530 if field.is_unsized() {
534 // Invariant: offset < dl.obj_size_bound() <= 1<<61
535 let field_align = if let Some(pack) = pack {
536 field.align.min(AbiAndPrefAlign::new(pack))
540 offset = offset.align_to(field_align.abi);
541 align = align.max(field_align);
543 debug!("univariant offset: {:?} field: {:#?}", offset, field);
544 offsets[i as usize] = offset;
546 if !repr.hide_niche() {
547 if let Some(mut niche) = field.largest_niche {
548 let available = niche.available(dl);
549 if available > largest_niche_available {
550 largest_niche_available = available;
551 niche.offset += offset;
552 largest_niche = Some(niche);
557 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
560 if let Some(repr_align) = repr.align {
561 align = align.max(AbiAndPrefAlign::new(repr_align));
564 debug!("univariant min_size: {:?}", offset);
565 let min_size = offset;
567 // As stated above, inverse_memory_index holds field indices by increasing offset.
568 // This makes it an already-sorted view of the offsets vec.
569 // To invert it, consider:
570 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
571 // Field 5 would be the first element, so memory_index is i:
572 // Note: if we didn't optimize, it's already right.
575 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
577 let size = min_size.align_to(align.abi);
578 let mut abi = Abi::Aggregate { sized };
580 // Unpack newtype ABIs and find scalar pairs.
581 if sized && size.bytes() > 0 {
582 // All other fields must be ZSTs.
583 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
585 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
586 // We have exactly one non-ZST field.
587 (Some((i, field)), None, None) => {
588 // Field fills the struct and it has a scalar or scalar pair ABI.
589 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
592 // For plain scalars, or vectors of them, we can't unpack
593 // newtypes for `#[repr(C)]`, as that affects C ABIs.
594 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
597 // But scalar pairs are Rust-specific and get
598 // treated as aggregates by C ABIs anyway.
599 Abi::ScalarPair(..) => {
607 // Two non-ZST fields, and they're both scalars.
608 (Some((i, a)), Some((j, b)), None) => {
609 match (a.abi, b.abi) {
610 (Abi::Scalar(a), Abi::Scalar(b)) => {
611 // Order by the memory placement, not source order.
612 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
617 let pair = self.scalar_pair(a, b);
618 let pair_offsets = match pair.fields {
619 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
620 assert_eq!(memory_index, &[0, 1]);
625 if offsets[i] == pair_offsets[0]
626 && offsets[j] == pair_offsets[1]
627 && align == pair.align
630 // We can use `ScalarPair` only when it matches our
631 // already computed layout (including `#[repr(C)]`).
643 if fields.iter().any(|f| f.abi.is_uninhabited()) {
644 abi = Abi::Uninhabited;
648 variants: Variants::Single { index: VariantIdx::new(0) },
649 fields: FieldsShape::Arbitrary { offsets, memory_index },
657 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
659 let param_env = self.param_env;
660 let dl = self.data_layout();
661 let scalar_unit = |value: Primitive| {
662 let size = value.size(dl);
663 assert!(size.bits() <= 128);
664 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
667 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
669 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
670 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
672 debug_assert!(!ty.has_infer_types_or_consts());
674 Ok(match *ty.kind() {
676 ty::Bool => tcx.intern_layout(LayoutS::scalar(
678 Scalar::Initialized {
679 value: Int(I8, false),
680 valid_range: WrappingRange { start: 0, end: 1 },
683 ty::Char => tcx.intern_layout(LayoutS::scalar(
685 Scalar::Initialized {
686 value: Int(I32, false),
687 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
690 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
691 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
692 ty::Float(fty) => scalar(match fty {
693 ty::FloatTy::F32 => F32,
694 ty::FloatTy::F64 => F64,
697 let mut ptr = scalar_unit(Pointer);
698 ptr.valid_range_mut().start = 1;
699 tcx.intern_layout(LayoutS::scalar(self, ptr))
703 ty::Never => tcx.intern_layout(LayoutS {
704 variants: Variants::Single { index: VariantIdx::new(0) },
705 fields: FieldsShape::Primitive,
706 abi: Abi::Uninhabited,
712 // Potentially-wide pointers.
713 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
714 let mut data_ptr = scalar_unit(Pointer);
715 if !ty.is_unsafe_ptr() {
716 data_ptr.valid_range_mut().start = 1;
719 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
720 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
721 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
724 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
725 let metadata = match unsized_part.kind() {
727 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
729 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
731 let mut vtable = scalar_unit(Pointer);
732 vtable.valid_range_mut().start = 1;
735 _ => return Err(LayoutError::Unknown(unsized_part)),
738 // Effectively a (ptr, meta) tuple.
739 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
742 // Arrays and slices.
743 ty::Array(element, mut count) => {
744 if count.has_projections() {
745 count = tcx.normalize_erasing_regions(param_env, count);
746 if count.has_projections() {
747 return Err(LayoutError::Unknown(ty));
751 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
752 let element = self.layout_of(element)?;
754 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
757 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
760 Abi::Aggregate { sized: true }
763 let largest_niche = if count != 0 { element.largest_niche } else { None };
765 tcx.intern_layout(LayoutS {
766 variants: Variants::Single { index: VariantIdx::new(0) },
767 fields: FieldsShape::Array { stride: element.size, count },
770 align: element.align,
774 ty::Slice(element) => {
775 let element = self.layout_of(element)?;
776 tcx.intern_layout(LayoutS {
777 variants: Variants::Single { index: VariantIdx::new(0) },
778 fields: FieldsShape::Array { stride: element.size, count: 0 },
779 abi: Abi::Aggregate { sized: false },
781 align: element.align,
785 ty::Str => tcx.intern_layout(LayoutS {
786 variants: Variants::Single { index: VariantIdx::new(0) },
787 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
788 abi: Abi::Aggregate { sized: false },
795 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
796 ty::Dynamic(..) | ty::Foreign(..) => {
797 let mut unit = self.univariant_uninterned(
800 &ReprOptions::default(),
801 StructKind::AlwaysSized,
804 Abi::Aggregate { ref mut sized } => *sized = false,
807 tcx.intern_layout(unit)
810 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
812 ty::Closure(_, ref substs) => {
813 let tys = substs.as_closure().upvar_tys();
815 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
816 &ReprOptions::default(),
817 StructKind::AlwaysSized,
823 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
826 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
827 &ReprOptions::default(),
832 // SIMD vector types.
833 ty::Adt(def, substs) if def.repr().simd() => {
834 if !def.is_struct() {
835 // Should have yielded E0517 by now.
836 tcx.sess.delay_span_bug(
838 "#[repr(simd)] was applied to an ADT that is not a struct",
840 return Err(LayoutError::Unknown(ty));
843 // Supported SIMD vectors are homogeneous ADTs with at least one field:
845 // * #[repr(simd)] struct S(T, T, T, T);
846 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
847 // * #[repr(simd)] struct S([T; 4])
849 // where T is a primitive scalar (integer/float/pointer).
851 // SIMD vectors with zero fields are not supported.
852 // (should be caught by typeck)
853 if def.non_enum_variant().fields.is_empty() {
854 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
857 // Type of the first ADT field:
858 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
860 // Heterogeneous SIMD vectors are not supported:
861 // (should be caught by typeck)
862 for fi in &def.non_enum_variant().fields {
863 if fi.ty(tcx, substs) != f0_ty {
864 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
868 // The element type and number of elements of the SIMD vector
869 // are obtained from:
871 // * the element type and length of the single array field, if
872 // the first field is of array type, or
874 // * the homogenous field type and the number of fields.
875 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
876 // First ADT field is an array:
878 // SIMD vectors with multiple array fields are not supported:
879 // (should be caught by typeck)
880 if def.non_enum_variant().fields.len() != 1 {
881 tcx.sess.fatal(&format!(
882 "monomorphising SIMD type `{}` with more than one array field",
887 // Extract the number of elements from the layout of the array field:
888 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
889 return Err(LayoutError::Unknown(ty));
892 (*e_ty, *count, true)
894 // First ADT field is not an array:
895 (f0_ty, def.non_enum_variant().fields.len() as _, false)
898 // SIMD vectors of zero length are not supported.
899 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
902 // Can't be caught in typeck if the array length is generic.
904 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
905 } else if e_len > MAX_SIMD_LANES {
906 tcx.sess.fatal(&format!(
907 "monomorphising SIMD type `{}` of length greater than {}",
912 // Compute the ABI of the element type:
913 let e_ly = self.layout_of(e_ty)?;
914 let Abi::Scalar(e_abi) = e_ly.abi else {
915 // This error isn't caught in typeck, e.g., if
916 // the element type of the vector is generic.
917 tcx.sess.fatal(&format!(
918 "monomorphising SIMD type `{}` with a non-primitive-scalar \
919 (integer/float/pointer) element type `{}`",
924 // Compute the size and alignment of the vector:
925 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
926 let align = dl.vector_align(size);
927 let size = size.align_to(align.abi);
929 // Compute the placement of the vector fields:
930 let fields = if is_array {
931 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
933 FieldsShape::Array { stride: e_ly.size, count: e_len }
936 tcx.intern_layout(LayoutS {
937 variants: Variants::Single { index: VariantIdx::new(0) },
939 abi: Abi::Vector { element: e_abi, count: e_len },
940 largest_niche: e_ly.largest_niche,
947 ty::Adt(def, substs) => {
948 // Cache the field layouts.
955 .map(|field| self.layout_of(field.ty(tcx, substs)))
956 .collect::<Result<Vec<_>, _>>()
958 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
961 if def.repr().pack.is_some() && def.repr().align.is_some() {
962 self.tcx.sess.delay_span_bug(
963 tcx.def_span(def.did()),
964 "union cannot be packed and aligned",
966 return Err(LayoutError::Unknown(ty));
970 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
972 if let Some(repr_align) = def.repr().align {
973 align = align.max(AbiAndPrefAlign::new(repr_align));
976 let optimize = !def.repr().inhibit_union_abi_opt();
977 let mut size = Size::ZERO;
978 let mut abi = Abi::Aggregate { sized: true };
979 let index = VariantIdx::new(0);
980 for field in &variants[index] {
981 assert!(!field.is_unsized());
982 align = align.max(field.align);
984 // If all non-ZST fields have the same ABI, forward this ABI
985 if optimize && !field.is_zst() {
986 // Discard valid range information and allow undef
987 let field_abi = match field.abi {
988 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
989 Abi::ScalarPair(x, y) => {
990 Abi::ScalarPair(x.to_union(), y.to_union())
992 Abi::Vector { element: x, count } => {
993 Abi::Vector { element: x.to_union(), count }
995 Abi::Uninhabited | Abi::Aggregate { .. } => {
996 Abi::Aggregate { sized: true }
1000 if size == Size::ZERO {
1001 // first non ZST: initialize 'abi'
1003 } else if abi != field_abi {
1004 // different fields have different ABI: reset to Aggregate
1005 abi = Abi::Aggregate { sized: true };
1009 size = cmp::max(size, field.size);
1012 if let Some(pack) = def.repr().pack {
1013 align = align.min(AbiAndPrefAlign::new(pack));
1016 return Ok(tcx.intern_layout(LayoutS {
1017 variants: Variants::Single { index },
1018 fields: FieldsShape::Union(
1019 NonZeroUsize::new(variants[index].len())
1020 .ok_or(LayoutError::Unknown(ty))?,
1023 largest_niche: None,
1025 size: size.align_to(align.abi),
1029 // A variant is absent if it's uninhabited and only has ZST fields.
1030 // Present uninhabited variants only require space for their fields,
1031 // but *not* an encoding of the discriminant (e.g., a tag value).
1032 // See issue #49298 for more details on the need to leave space
1033 // for non-ZST uninhabited data (mostly partial initialization).
1034 let absent = |fields: &[TyAndLayout<'_>]| {
1035 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
1036 let is_zst = fields.iter().all(|f| f.is_zst());
1037 uninhabited && is_zst
1039 let (present_first, present_second) = {
1040 let mut present_variants = variants
1042 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
1043 (present_variants.next(), present_variants.next())
1045 let present_first = match present_first {
1046 Some(present_first) => present_first,
1047 // Uninhabited because it has no variants, or only absent ones.
1048 None if def.is_enum() => {
1049 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
1051 // If it's a struct, still compute a layout so that we can still compute the
1053 None => VariantIdx::new(0),
1056 let is_struct = !def.is_enum() ||
1057 // Only one variant is present.
1058 (present_second.is_none() &&
1059 // Representation optimizations are allowed.
1060 !def.repr().inhibit_enum_layout_opt());
1062 // Struct, or univariant enum equivalent to a struct.
1063 // (Typechecking will reject discriminant-sizing attrs.)
1065 let v = present_first;
1066 let kind = if def.is_enum() || variants[v].is_empty() {
1067 StructKind::AlwaysSized
1069 let param_env = tcx.param_env(def.did());
1070 let last_field = def.variant(v).fields.last().unwrap();
1072 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
1074 StructKind::MaybeUnsized
1076 StructKind::AlwaysSized
1080 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
1081 st.variants = Variants::Single { index: v };
1082 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1084 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1085 // the asserts ensure that we are not using the
1086 // `#[rustc_layout_scalar_valid_range(n)]`
1087 // attribute to widen the range of anything as that would probably
1088 // result in UB somewhere
1089 // FIXME(eddyb) the asserts are probably not needed,
1090 // as larger validity ranges would result in missed
1091 // optimizations, *not* wrongly assuming the inner
1092 // value is valid. e.g. unions enlarge validity ranges,
1093 // because the values may be uninitialized.
1094 if let Bound::Included(start) = start {
1095 // FIXME(eddyb) this might be incorrect - it doesn't
1096 // account for wrap-around (end < start) ranges.
1097 let valid_range = scalar.valid_range_mut();
1098 assert!(valid_range.start <= start);
1099 valid_range.start = start;
1101 if let Bound::Included(end) = end {
1102 // FIXME(eddyb) this might be incorrect - it doesn't
1103 // account for wrap-around (end < start) ranges.
1104 let valid_range = scalar.valid_range_mut();
1105 assert!(valid_range.end >= end);
1106 valid_range.end = end;
1109 // Update `largest_niche` if we have introduced a larger niche.
1110 let niche = if def.repr().hide_niche() {
1113 Niche::from_scalar(dl, Size::ZERO, *scalar)
1115 if let Some(niche) = niche {
1116 match st.largest_niche {
1117 Some(largest_niche) => {
1118 // Replace the existing niche even if they're equal,
1119 // because this one is at a lower offset.
1120 if largest_niche.available(dl) <= niche.available(dl) {
1121 st.largest_niche = Some(niche);
1124 None => st.largest_niche = Some(niche),
1129 start == Bound::Unbounded && end == Bound::Unbounded,
1130 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1136 return Ok(tcx.intern_layout(st));
1139 // At this point, we have handled all unions and
1140 // structs. (We have also handled univariant enums
1141 // that allow representation optimization.)
1142 assert!(def.is_enum());
1144 // The current code for niche-filling relies on variant indices
1145 // instead of actual discriminants, so dataful enums with
1146 // explicit discriminants (RFC #2363) would misbehave.
1147 let no_explicit_discriminants = def
1150 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1152 let mut niche_filling_layout = None;
1154 // Niche-filling enum optimization.
1155 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1156 let mut dataful_variant = None;
1157 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1159 // Find one non-ZST variant.
1160 'variants: for (v, fields) in variants.iter_enumerated() {
1166 if dataful_variant.is_none() {
1167 dataful_variant = Some(v);
1170 dataful_variant = None;
1175 niche_variants = *niche_variants.start().min(&v)..=v;
1178 if niche_variants.start() > niche_variants.end() {
1179 dataful_variant = None;
1182 if let Some(i) = dataful_variant {
1183 let count = (niche_variants.end().as_u32()
1184 - niche_variants.start().as_u32()
1187 // Find the field with the largest niche
1188 let niche_candidate = variants[i]
1191 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1192 .max_by_key(|(_, niche)| niche.available(dl));
1194 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1195 niche_candidate.and_then(|(field_index, niche)| {
1196 Some((field_index, niche, niche.reserve(self, count)?))
1199 let mut align = dl.aggregate_align;
1203 let mut st = self.univariant_uninterned(
1207 StructKind::AlwaysSized,
1209 st.variants = Variants::Single { index: j };
1211 align = align.max(st.align);
1213 Ok(tcx.intern_layout(st))
1215 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1217 let offset = st[i].fields().offset(field_index) + niche.offset;
1218 let size = st[i].size();
1220 let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1224 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1225 Abi::ScalarPair(first, second) => {
1226 // Only the niche is guaranteed to be initialised,
1227 // so use union layout for the other primitive.
1228 if offset.bytes() == 0 {
1229 Abi::ScalarPair(niche_scalar, second.to_union())
1231 Abi::ScalarPair(first.to_union(), niche_scalar)
1234 _ => Abi::Aggregate { sized: true },
1238 let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1240 niche_filling_layout = Some(LayoutS {
1241 variants: Variants::Multiple {
1243 tag_encoding: TagEncoding::Niche {
1251 fields: FieldsShape::Arbitrary {
1252 offsets: vec![offset],
1253 memory_index: vec![0],
1264 let (mut min, mut max) = (i128::MAX, i128::MIN);
1265 let discr_type = def.repr().discr_type();
1266 let bits = Integer::from_attr(self, discr_type).size().bits();
1267 for (i, discr) in def.discriminants(tcx) {
1268 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1271 let mut x = discr.val as i128;
1272 if discr_type.is_signed() {
1273 // sign extend the raw representation to be an i128
1274 x = (x << (128 - bits)) >> (128 - bits);
1283 // We might have no inhabited variants, so pretend there's at least one.
1284 if (min, max) == (i128::MAX, i128::MIN) {
1288 assert!(min <= max, "discriminant range is {}...{}", min, max);
1289 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1291 let mut align = dl.aggregate_align;
1292 let mut size = Size::ZERO;
1294 // We're interested in the smallest alignment, so start large.
1295 let mut start_align = Align::from_bytes(256).unwrap();
1296 assert_eq!(Integer::for_align(dl, start_align), None);
1298 // repr(C) on an enum tells us to make a (tag, union) layout,
1299 // so we need to grow the prefix alignment to be at least
1300 // the alignment of the union. (This value is used both for
1301 // determining the alignment of the overall enum, and the
1302 // determining the alignment of the payload after the tag.)
1303 let mut prefix_align = min_ity.align(dl).abi;
1305 for fields in &variants {
1306 for field in fields {
1307 prefix_align = prefix_align.max(field.align.abi);
1312 // Create the set of structs that represent each variant.
1313 let mut layout_variants = variants
1315 .map(|(i, field_layouts)| {
1316 let mut st = self.univariant_uninterned(
1320 StructKind::Prefixed(min_ity.size(), prefix_align),
1322 st.variants = Variants::Single { index: i };
1323 // Find the first field we can't move later
1324 // to make room for a larger discriminant.
1326 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1328 if !field.is_zst() || field.align.abi.bytes() != 1 {
1329 start_align = start_align.min(field.align.abi);
1333 size = cmp::max(size, st.size);
1334 align = align.max(st.align);
1337 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1339 // Align the maximum variant size to the largest alignment.
1340 size = size.align_to(align.abi);
1342 if size.bytes() >= dl.obj_size_bound() {
1343 return Err(LayoutError::SizeOverflow(ty));
1346 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1347 if typeck_ity < min_ity {
1348 // It is a bug if Layout decided on a greater discriminant size than typeck for
1349 // some reason at this point (based on values discriminant can take on). Mostly
1350 // because this discriminant will be loaded, and then stored into variable of
1351 // type calculated by typeck. Consider such case (a bug): typeck decided on
1352 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1353 // discriminant values. That would be a bug, because then, in codegen, in order
1354 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1355 // space necessary to represent would have to be discarded (or layout is wrong
1356 // on thinking it needs 16 bits)
1358 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1362 // However, it is fine to make discr type however large (as an optimisation)
1363 // after this point – we’ll just truncate the value we load in codegen.
1366 // Check to see if we should use a different type for the
1367 // discriminant. We can safely use a type with the same size
1368 // as the alignment of the first field of each variant.
1369 // We increase the size of the discriminant to avoid LLVM copying
1370 // padding when it doesn't need to. This normally causes unaligned
1371 // load/stores and excessive memcpy/memset operations. By using a
1372 // bigger integer size, LLVM can be sure about its contents and
1373 // won't be so conservative.
1375 // Use the initial field alignment
1376 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1379 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1382 // If the alignment is not larger than the chosen discriminant size,
1383 // don't use the alignment as the final size.
1387 // Patch up the variants' first few fields.
1388 let old_ity_size = min_ity.size();
1389 let new_ity_size = ity.size();
1390 for variant in &mut layout_variants {
1391 match variant.fields {
1392 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1394 if *i <= old_ity_size {
1395 assert_eq!(*i, old_ity_size);
1399 // We might be making the struct larger.
1400 if variant.size <= old_ity_size {
1401 variant.size = new_ity_size;
1409 let tag_mask = ity.size().unsigned_int_max();
1410 let tag = Scalar::Initialized {
1411 value: Int(ity, signed),
1412 valid_range: WrappingRange {
1413 start: (min as u128 & tag_mask),
1414 end: (max as u128 & tag_mask),
1417 let mut abi = Abi::Aggregate { sized: true };
1419 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1420 abi = Abi::Uninhabited;
1421 } else if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1422 // Without latter check aligned enums with custom discriminant values
1423 // Would result in ICE see the issue #92464 for more info
1424 abi = Abi::Scalar(tag);
1426 // Try to use a ScalarPair for all tagged enums.
1427 let mut common_prim = None;
1428 let mut common_prim_initialized_in_all_variants = true;
1429 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1430 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1434 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1435 let (field, offset) = match (fields.next(), fields.next()) {
1437 common_prim_initialized_in_all_variants = false;
1440 (Some(pair), None) => pair,
1446 let prim = match field.abi {
1447 Abi::Scalar(scalar) => {
1448 common_prim_initialized_in_all_variants &=
1449 matches!(scalar, Scalar::Initialized { .. });
1457 if let Some(pair) = common_prim {
1458 // This is pretty conservative. We could go fancier
1459 // by conflating things like i32 and u32, or even
1460 // realising that (u8, u8) could just cohabit with
1462 if pair != (prim, offset) {
1467 common_prim = Some((prim, offset));
1470 if let Some((prim, offset)) = common_prim {
1471 let prim_scalar = if common_prim_initialized_in_all_variants {
1474 // Common prim might be uninit.
1475 Scalar::Union { value: prim }
1477 let pair = self.scalar_pair(tag, prim_scalar);
1478 let pair_offsets = match pair.fields {
1479 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1480 assert_eq!(memory_index, &[0, 1]);
1485 if pair_offsets[0] == Size::ZERO
1486 && pair_offsets[1] == *offset
1487 && align == pair.align
1488 && size == pair.size
1490 // We can use `ScalarPair` only when it matches our
1491 // already computed layout (including `#[repr(C)]`).
1497 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1498 // variants to ensure they are consistent. This is because a downcast is
1499 // semantically a NOP, and thus should not affect layout.
1500 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1501 for variant in &mut layout_variants {
1502 // We only do this for variants with fields; the others are not accessed anyway.
1503 // Also do not overwrite any already existing "clever" ABIs.
1504 if variant.fields.count() > 0
1505 && matches!(variant.abi, Abi::Aggregate { .. })
1508 // Also need to bump up the size and alignment, so that the entire value fits in here.
1509 variant.size = cmp::max(variant.size, size);
1510 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1515 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1517 let layout_variants =
1518 layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1520 let tagged_layout = LayoutS {
1521 variants: Variants::Multiple {
1523 tag_encoding: TagEncoding::Direct,
1525 variants: layout_variants,
1527 fields: FieldsShape::Arbitrary {
1528 offsets: vec![Size::ZERO],
1529 memory_index: vec![0],
1537 let best_layout = match (tagged_layout, niche_filling_layout) {
1538 (tagged_layout, Some(niche_filling_layout)) => {
1539 // Pick the smaller layout; otherwise,
1540 // pick the layout with the larger niche; otherwise,
1541 // pick tagged as it has simpler codegen.
1542 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1543 let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1544 (layout.size, cmp::Reverse(niche_size))
1547 (tagged_layout, None) => tagged_layout,
1550 tcx.intern_layout(best_layout)
1553 // Types with no meaningful known layout.
1554 ty::Projection(_) | ty::Opaque(..) => {
1555 // NOTE(eddyb) `layout_of` query should've normalized these away,
1556 // if that was possible, so there's no reason to try again here.
1557 return Err(LayoutError::Unknown(ty));
1560 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1561 bug!("Layout::compute: unexpected type `{}`", ty)
1564 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1565 return Err(LayoutError::Unknown(ty));
1571 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1572 #[derive(Clone, Debug, PartialEq)]
1573 enum SavedLocalEligibility {
1575 Assigned(VariantIdx),
1576 // FIXME: Use newtype_index so we aren't wasting bytes
1577 Ineligible(Option<u32>),
1580 // When laying out generators, we divide our saved local fields into two
1581 // categories: overlap-eligible and overlap-ineligible.
1583 // Those fields which are ineligible for overlap go in a "prefix" at the
1584 // beginning of the layout, and always have space reserved for them.
1586 // Overlap-eligible fields are only assigned to one variant, so we lay
1587 // those fields out for each variant and put them right after the
1590 // Finally, in the layout details, we point to the fields from the
1591 // variants they are assigned to. It is possible for some fields to be
1592 // included in multiple variants. No field ever "moves around" in the
1593 // layout; its offset is always the same.
1595 // Also included in the layout are the upvars and the discriminant.
1596 // These are included as fields on the "outer" layout; they are not part
1598 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1599 /// Compute the eligibility and assignment of each local.
1600 fn generator_saved_local_eligibility(
1602 info: &GeneratorLayout<'tcx>,
1603 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1604 use SavedLocalEligibility::*;
1606 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1607 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1609 // The saved locals not eligible for overlap. These will get
1610 // "promoted" to the prefix of our generator.
1611 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1613 // Figure out which of our saved locals are fields in only
1614 // one variant. The rest are deemed ineligible for overlap.
1615 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1616 for local in fields {
1617 match assignments[*local] {
1619 assignments[*local] = Assigned(variant_index);
1622 // We've already seen this local at another suspension
1623 // point, so it is no longer a candidate.
1625 "removing local {:?} in >1 variant ({:?}, {:?})",
1630 ineligible_locals.insert(*local);
1631 assignments[*local] = Ineligible(None);
1638 // Next, check every pair of eligible locals to see if they
1640 for local_a in info.storage_conflicts.rows() {
1641 let conflicts_a = info.storage_conflicts.count(local_a);
1642 if ineligible_locals.contains(local_a) {
1646 for local_b in info.storage_conflicts.iter(local_a) {
1647 // local_a and local_b are storage live at the same time, therefore they
1648 // cannot overlap in the generator layout. The only way to guarantee
1649 // this is if they are in the same variant, or one is ineligible
1650 // (which means it is stored in every variant).
1651 if ineligible_locals.contains(local_b)
1652 || assignments[local_a] == assignments[local_b]
1657 // If they conflict, we will choose one to make ineligible.
1658 // This is not always optimal; it's just a greedy heuristic that
1659 // seems to produce good results most of the time.
1660 let conflicts_b = info.storage_conflicts.count(local_b);
1661 let (remove, other) =
1662 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1663 ineligible_locals.insert(remove);
1664 assignments[remove] = Ineligible(None);
1665 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1669 // Count the number of variants in use. If only one of them, then it is
1670 // impossible to overlap any locals in our layout. In this case it's
1671 // always better to make the remaining locals ineligible, so we can
1672 // lay them out with the other locals in the prefix and eliminate
1673 // unnecessary padding bytes.
1675 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1676 for assignment in &assignments {
1677 if let Assigned(idx) = assignment {
1678 used_variants.insert(*idx);
1681 if used_variants.count() < 2 {
1682 for assignment in assignments.iter_mut() {
1683 *assignment = Ineligible(None);
1685 ineligible_locals.insert_all();
1689 // Write down the order of our locals that will be promoted to the prefix.
1691 for (idx, local) in ineligible_locals.iter().enumerate() {
1692 assignments[local] = Ineligible(Some(idx as u32));
1695 debug!("generator saved local assignments: {:?}", assignments);
1697 (ineligible_locals, assignments)
1700 /// Compute the full generator layout.
1701 fn generator_layout(
1704 def_id: hir::def_id::DefId,
1705 substs: SubstsRef<'tcx>,
1706 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1707 use SavedLocalEligibility::*;
1709 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1711 let Some(info) = tcx.generator_layout(def_id) else {
1712 return Err(LayoutError::Unknown(ty));
1714 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1716 // Build a prefix layout, including "promoting" all ineligible
1717 // locals as part of the prefix. We compute the layout of all of
1718 // these fields at once to get optimal packing.
1719 let tag_index = substs.as_generator().prefix_tys().count();
1721 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1722 let max_discr = (info.variant_fields.len() - 1) as u128;
1723 let discr_int = Integer::fit_unsigned(max_discr);
1724 let discr_int_ty = discr_int.to_ty(tcx, false);
1725 let tag = Scalar::Initialized {
1726 value: Primitive::Int(discr_int, false),
1727 valid_range: WrappingRange { start: 0, end: max_discr },
1729 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1730 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1732 let promoted_layouts = ineligible_locals
1734 .map(|local| subst_field(info.field_tys[local]))
1735 .map(|ty| tcx.mk_maybe_uninit(ty))
1736 .map(|ty| self.layout_of(ty));
1737 let prefix_layouts = substs
1740 .map(|ty| self.layout_of(ty))
1741 .chain(iter::once(Ok(tag_layout)))
1742 .chain(promoted_layouts)
1743 .collect::<Result<Vec<_>, _>>()?;
1744 let prefix = self.univariant_uninterned(
1747 &ReprOptions::default(),
1748 StructKind::AlwaysSized,
1751 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1753 // Split the prefix layout into the "outer" fields (upvars and
1754 // discriminant) and the "promoted" fields. Promoted fields will
1755 // get included in each variant that requested them in
1757 debug!("prefix = {:#?}", prefix);
1758 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1759 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1760 let mut inverse_memory_index = invert_mapping(&memory_index);
1762 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1763 // "outer" and "promoted" fields respectively.
1764 let b_start = (tag_index + 1) as u32;
1765 let offsets_b = offsets.split_off(b_start as usize);
1766 let offsets_a = offsets;
1768 // Disentangle the "a" and "b" components of `inverse_memory_index`
1769 // by preserving the order but keeping only one disjoint "half" each.
1770 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1771 let inverse_memory_index_b: Vec<_> =
1772 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1773 inverse_memory_index.retain(|&i| i < b_start);
1774 let inverse_memory_index_a = inverse_memory_index;
1776 // Since `inverse_memory_index_{a,b}` each only refer to their
1777 // respective fields, they can be safely inverted
1778 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1779 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1782 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1783 (outer_fields, offsets_b, memory_index_b)
1788 let mut size = prefix.size;
1789 let mut align = prefix.align;
1793 .map(|(index, variant_fields)| {
1794 // Only include overlap-eligible fields when we compute our variant layout.
1795 let variant_only_tys = variant_fields
1797 .filter(|local| match assignments[**local] {
1798 Unassigned => bug!(),
1799 Assigned(v) if v == index => true,
1800 Assigned(_) => bug!("assignment does not match variant"),
1801 Ineligible(_) => false,
1803 .map(|local| subst_field(info.field_tys[*local]));
1805 let mut variant = self.univariant_uninterned(
1808 .map(|ty| self.layout_of(ty))
1809 .collect::<Result<Vec<_>, _>>()?,
1810 &ReprOptions::default(),
1811 StructKind::Prefixed(prefix_size, prefix_align.abi),
1813 variant.variants = Variants::Single { index };
1815 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1819 // Now, stitch the promoted and variant-only fields back together in
1820 // the order they are mentioned by our GeneratorLayout.
1821 // Because we only use some subset (that can differ between variants)
1822 // of the promoted fields, we can't just pick those elements of the
1823 // `promoted_memory_index` (as we'd end up with gaps).
1824 // So instead, we build an "inverse memory_index", as if all of the
1825 // promoted fields were being used, but leave the elements not in the
1826 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1827 // obtain a valid (bijective) mapping.
1828 const INVALID_FIELD_IDX: u32 = !0;
1829 let mut combined_inverse_memory_index =
1830 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1831 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1832 let combined_offsets = variant_fields
1836 let (offset, memory_index) = match assignments[*local] {
1837 Unassigned => bug!(),
1839 let (offset, memory_index) =
1840 offsets_and_memory_index.next().unwrap();
1841 (offset, promoted_memory_index.len() as u32 + memory_index)
1843 Ineligible(field_idx) => {
1844 let field_idx = field_idx.unwrap() as usize;
1845 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1848 combined_inverse_memory_index[memory_index as usize] = i as u32;
1853 // Remove the unused slots and invert the mapping to obtain the
1854 // combined `memory_index` (also see previous comment).
1855 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1856 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1858 variant.fields = FieldsShape::Arbitrary {
1859 offsets: combined_offsets,
1860 memory_index: combined_memory_index,
1863 size = size.max(variant.size);
1864 align = align.max(variant.align);
1865 Ok(tcx.intern_layout(variant))
1867 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1869 size = size.align_to(align.abi);
1872 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1875 Abi::Aggregate { sized: true }
1878 let layout = tcx.intern_layout(LayoutS {
1879 variants: Variants::Multiple {
1881 tag_encoding: TagEncoding::Direct,
1882 tag_field: tag_index,
1885 fields: outer_fields,
1887 largest_niche: prefix.largest_niche,
1891 debug!("generator layout ({:?}): {:#?}", ty, layout);
1895 /// This is invoked by the `layout_of` query to record the final
1896 /// layout of each type.
1898 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1899 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1900 // for dumping later.
1901 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1902 self.record_layout_for_printing_outlined(layout)
1906 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1907 // Ignore layouts that are done with non-empty environments or
1908 // non-monomorphic layouts, as the user only wants to see the stuff
1909 // resulting from the final codegen session.
1910 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1914 // (delay format until we actually need it)
1915 let record = |kind, packed, opt_discr_size, variants| {
1916 let type_desc = format!("{:?}", layout.ty);
1917 self.tcx.sess.code_stats.record_type_size(
1928 let adt_def = match *layout.ty.kind() {
1929 ty::Adt(ref adt_def, _) => {
1930 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1934 ty::Closure(..) => {
1935 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1936 record(DataTypeKind::Closure, false, None, vec![]);
1941 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1946 let adt_kind = adt_def.adt_kind();
1947 let adt_packed = adt_def.repr().pack.is_some();
1949 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1950 let mut min_size = Size::ZERO;
1951 let field_info: Vec<_> = flds
1955 let field_layout = layout.field(self, i);
1956 let offset = layout.fields.offset(i);
1957 let field_end = offset + field_layout.size;
1958 if min_size < field_end {
1959 min_size = field_end;
1962 name: name.to_string(),
1963 offset: offset.bytes(),
1964 size: field_layout.size.bytes(),
1965 align: field_layout.align.abi.bytes(),
1971 name: n.map(|n| n.to_string()),
1972 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1973 align: layout.align.abi.bytes(),
1974 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1979 match layout.variants {
1980 Variants::Single { index } => {
1981 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1983 "print-type-size `{:#?}` variant {}",
1985 adt_def.variant(index).name
1987 let variant_def = &adt_def.variant(index);
1988 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1993 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1996 // (This case arises for *empty* enums; so give it
1998 record(adt_kind.into(), adt_packed, None, vec![]);
2002 Variants::Multiple { tag, ref tag_encoding, .. } => {
2004 "print-type-size `{:#?}` adt general variants def {}",
2006 adt_def.variants().len()
2008 let variant_infos: Vec<_> = adt_def
2011 .map(|(i, variant_def)| {
2012 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2014 Some(variant_def.name),
2016 layout.for_variant(self, i),
2023 match tag_encoding {
2024 TagEncoding::Direct => Some(tag.size(self)),
2034 /// Type size "skeleton", i.e., the only information determining a type's size.
2035 /// While this is conservative, (aside from constant sizes, only pointers,
2036 /// newtypes thereof and null pointer optimized enums are allowed), it is
2037 /// enough to statically check common use cases of transmute.
2038 #[derive(Copy, Clone, Debug)]
2039 pub enum SizeSkeleton<'tcx> {
2040 /// Any statically computable Layout.
2043 /// A potentially-fat pointer.
2045 /// If true, this pointer is never null.
2047 /// The type which determines the unsized metadata, if any,
2048 /// of this pointer. Either a type parameter or a projection
2049 /// depending on one, with regions erased.
2054 impl<'tcx> SizeSkeleton<'tcx> {
2058 param_env: ty::ParamEnv<'tcx>,
2059 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2060 debug_assert!(!ty.has_infer_types_or_consts());
2062 // First try computing a static layout.
2063 let err = match tcx.layout_of(param_env.and(ty)) {
2065 return Ok(SizeSkeleton::Known(layout.size));
2071 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2072 let non_zero = !ty.is_unsafe_ptr();
2073 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2075 ty::Param(_) | ty::Projection(_) => {
2076 debug_assert!(tail.has_param_types_or_consts());
2077 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2080 "SizeSkeleton::compute({}): layout errored ({}), yet \
2081 tail `{}` is not a type parameter or a projection",
2089 ty::Adt(def, substs) => {
2090 // Only newtypes and enums w/ nullable pointer optimization.
2091 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2095 // Get a zero-sized variant or a pointer newtype.
2096 let zero_or_ptr_variant = |i| {
2097 let i = VariantIdx::new(i);
2099 def.variant(i).fields.iter().map(|field| {
2100 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2103 for field in fields {
2106 SizeSkeleton::Known(size) => {
2107 if size.bytes() > 0 {
2111 SizeSkeleton::Pointer { .. } => {
2122 let v0 = zero_or_ptr_variant(0)?;
2124 if def.variants().len() == 1 {
2125 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2126 return Ok(SizeSkeleton::Pointer {
2128 || match tcx.layout_scalar_valid_range(def.did()) {
2129 (Bound::Included(start), Bound::Unbounded) => start > 0,
2130 (Bound::Included(start), Bound::Included(end)) => {
2131 0 < start && start < end
2142 let v1 = zero_or_ptr_variant(1)?;
2143 // Nullable pointer enum optimization.
2145 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2146 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2147 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2153 ty::Projection(_) | ty::Opaque(..) => {
2154 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2155 if ty == normalized {
2158 SizeSkeleton::compute(normalized, tcx, param_env)
2166 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2167 match (self, other) {
2168 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2169 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2177 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2178 fn tcx(&self) -> TyCtxt<'tcx>;
2181 pub trait HasParamEnv<'tcx> {
2182 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2185 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2187 fn data_layout(&self) -> &TargetDataLayout {
2192 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2193 fn target_spec(&self) -> &Target {
2198 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2200 fn tcx(&self) -> TyCtxt<'tcx> {
2205 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2207 fn data_layout(&self) -> &TargetDataLayout {
2212 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2213 fn target_spec(&self) -> &Target {
2218 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2220 fn tcx(&self) -> TyCtxt<'tcx> {
2225 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2226 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2231 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2232 fn data_layout(&self) -> &TargetDataLayout {
2233 self.tcx.data_layout()
2237 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2238 fn target_spec(&self) -> &Target {
2239 self.tcx.target_spec()
2243 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2244 fn tcx(&self) -> TyCtxt<'tcx> {
2249 pub trait MaybeResult<T> {
2252 fn from(x: Result<T, Self::Error>) -> Self;
2253 fn to_result(self) -> Result<T, Self::Error>;
2256 impl<T> MaybeResult<T> for T {
2259 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2262 fn to_result(self) -> Result<T, Self::Error> {
2267 impl<T, E> MaybeResult<T> for Result<T, E> {
2270 fn from(x: Result<T, Self::Error>) -> Self {
2273 fn to_result(self) -> Result<T, Self::Error> {
2278 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2280 /// Trait for contexts that want to be able to compute layouts of types.
2281 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2282 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2283 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2284 /// returned from `layout_of` (see also `handle_layout_err`).
2285 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2287 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2288 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2290 fn layout_tcx_at_span(&self) -> Span {
2294 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2295 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2297 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2298 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2299 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2300 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2301 fn handle_layout_err(
2303 err: LayoutError<'tcx>,
2306 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2309 /// Blanket extension trait for contexts that can compute layouts of types.
2310 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2311 /// Computes the layout of a type. Note that this implicitly
2312 /// executes in "reveal all" mode, and will normalize the input type.
2314 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2315 self.spanned_layout_of(ty, DUMMY_SP)
2318 /// Computes the layout of a type, at `span`. Note that this implicitly
2319 /// executes in "reveal all" mode, and will normalize the input type.
2320 // FIXME(eddyb) avoid passing information like this, and instead add more
2321 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2323 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2324 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2325 let tcx = self.tcx().at(span);
2328 tcx.layout_of(self.param_env().and(ty))
2329 .map_err(|err| self.handle_layout_err(err, span, ty)),
2334 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2336 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2337 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2340 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2345 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2346 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2349 fn layout_tcx_at_span(&self) -> Span {
2354 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2359 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2361 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2363 fn ty_and_layout_for_variant(
2364 this: TyAndLayout<'tcx>,
2366 variant_index: VariantIdx,
2367 ) -> TyAndLayout<'tcx> {
2368 let layout = match this.variants {
2369 Variants::Single { index }
2370 // If all variants but one are uninhabited, the variant layout is the enum layout.
2371 if index == variant_index &&
2372 // Don't confuse variants of uninhabited enums with the enum itself.
2373 // For more details see https://github.com/rust-lang/rust/issues/69763.
2374 this.fields != FieldsShape::Primitive =>
2379 Variants::Single { index } => {
2381 let param_env = cx.param_env();
2383 // Deny calling for_variant more than once for non-Single enums.
2384 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2385 assert_eq!(original_layout.variants, Variants::Single { index });
2388 let fields = match this.ty.kind() {
2389 ty::Adt(def, _) if def.variants().is_empty() =>
2390 bug!("for_variant called on zero-variant enum"),
2391 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2394 tcx.intern_layout(LayoutS {
2395 variants: Variants::Single { index: variant_index },
2396 fields: match NonZeroUsize::new(fields) {
2397 Some(fields) => FieldsShape::Union(fields),
2398 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2400 abi: Abi::Uninhabited,
2401 largest_niche: None,
2402 align: tcx.data_layout.i8_align,
2407 Variants::Multiple { ref variants, .. } => variants[variant_index],
2410 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2412 TyAndLayout { ty: this.ty, layout }
2415 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2416 enum TyMaybeWithLayout<'tcx> {
2418 TyAndLayout(TyAndLayout<'tcx>),
2421 fn field_ty_or_layout<'tcx>(
2422 this: TyAndLayout<'tcx>,
2423 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2425 ) -> TyMaybeWithLayout<'tcx> {
2427 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2429 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2430 ty: tag.primitive().to_ty(tcx),
2434 match *this.ty.kind() {
2443 | ty::GeneratorWitness(..)
2445 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2447 // Potentially-fat pointers.
2448 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2449 assert!(i < this.fields.count());
2451 // Reuse the fat `*T` type as its own thin pointer data field.
2452 // This provides information about, e.g., DST struct pointees
2453 // (which may have no non-DST form), and will work as long
2454 // as the `Abi` or `FieldsShape` is checked by users.
2456 let nil = tcx.mk_unit();
2457 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2460 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2463 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2464 // the `Result` should always work because the type is
2465 // always either `*mut ()` or `&'static mut ()`.
2466 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2468 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2472 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2473 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2474 ty::Dynamic(_, _) => {
2475 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2476 tcx.lifetimes.re_static,
2477 tcx.mk_array(tcx.types.usize, 3),
2479 /* FIXME: use actual fn pointers
2480 Warning: naively computing the number of entries in the
2481 vtable by counting the methods on the trait + methods on
2482 all parent traits does not work, because some methods can
2483 be not object safe and thus excluded from the vtable.
2484 Increase this counter if you tried to implement this but
2485 failed to do it without duplicating a lot of code from
2486 other places in the compiler: 2
2488 tcx.mk_array(tcx.types.usize, 3),
2489 tcx.mk_array(Option<fn()>),
2493 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2497 // Arrays and slices.
2498 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2499 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2501 // Tuples, generators and closures.
2502 ty::Closure(_, ref substs) => field_ty_or_layout(
2503 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2508 ty::Generator(def_id, ref substs, _) => match this.variants {
2509 Variants::Single { index } => TyMaybeWithLayout::Ty(
2512 .state_tys(def_id, tcx)
2513 .nth(index.as_usize())
2518 Variants::Multiple { tag, tag_field, .. } => {
2520 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2522 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2526 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2529 ty::Adt(def, substs) => {
2530 match this.variants {
2531 Variants::Single { index } => {
2532 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2535 // Discriminant field for enums (where applicable).
2536 Variants::Multiple { tag, .. } => {
2538 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2545 | ty::Placeholder(..)
2549 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2553 match field_ty_or_layout(this, cx, i) {
2554 TyMaybeWithLayout::Ty(field_ty) => {
2555 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2557 "failed to get layout for `{}`: {},\n\
2558 despite it being a field (#{}) of an existing layout: {:#?}",
2566 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2570 fn ty_and_layout_pointee_info_at(
2571 this: TyAndLayout<'tcx>,
2574 ) -> Option<PointeeInfo> {
2576 let param_env = cx.param_env();
2578 let addr_space_of_ty = |ty: Ty<'tcx>| {
2579 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2582 let pointee_info = match *this.ty.kind() {
2583 ty::RawPtr(mt) if offset.bytes() == 0 => {
2584 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2586 align: layout.align.abi,
2588 address_space: addr_space_of_ty(mt.ty),
2591 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2592 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2594 align: layout.align.abi,
2596 address_space: cx.data_layout().instruction_address_space,
2599 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2600 let address_space = addr_space_of_ty(ty);
2601 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2602 // Use conservative pointer kind if not optimizing. This saves us the
2603 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2604 // attributes in LLVM have compile-time cost even in unoptimized builds).
2608 hir::Mutability::Not => {
2609 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2615 hir::Mutability::Mut => {
2616 // References to self-referential structures should not be considered
2617 // noalias, as another pointer to the structure can be obtained, that
2618 // is not based-on the original reference. We consider all !Unpin
2619 // types to be potentially self-referential here.
2620 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2621 PointerKind::UniqueBorrowed
2629 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2631 align: layout.align.abi,
2638 let mut data_variant = match this.variants {
2639 // Within the discriminant field, only the niche itself is
2640 // always initialized, so we only check for a pointer at its
2643 // If the niche is a pointer, it's either valid (according
2644 // to its type), or null (which the niche field's scalar
2645 // validity range encodes). This allows using
2646 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2647 // this will continue to work as long as we don't start
2648 // using more niches than just null (e.g., the first page of
2649 // the address space, or unaligned pointers).
2650 Variants::Multiple {
2651 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2654 } if this.fields.offset(tag_field) == offset => {
2655 Some(this.for_variant(cx, dataful_variant))
2660 if let Some(variant) = data_variant {
2661 // We're not interested in any unions.
2662 if let FieldsShape::Union(_) = variant.fields {
2663 data_variant = None;
2667 let mut result = None;
2669 if let Some(variant) = data_variant {
2670 let ptr_end = offset + Pointer.size(cx);
2671 for i in 0..variant.fields.count() {
2672 let field_start = variant.fields.offset(i);
2673 if field_start <= offset {
2674 let field = variant.field(cx, i);
2675 result = field.to_result().ok().and_then(|field| {
2676 if ptr_end <= field_start + field.size {
2677 // We found the right field, look inside it.
2679 field.pointee_info_at(cx, offset - field_start);
2685 if result.is_some() {
2692 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2693 if let Some(ref mut pointee) = result {
2694 if let ty::Adt(def, _) = this.ty.kind() {
2695 if def.is_box() && offset.bytes() == 0 {
2696 pointee.safe = Some(PointerKind::UniqueOwned);
2706 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2715 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2716 matches!(this.ty.kind(), ty::Adt(..))
2719 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2720 this.ty.kind() == &ty::Never
2723 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2724 matches!(this.ty.kind(), ty::Tuple(..))
2727 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2728 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2732 impl<'tcx> ty::Instance<'tcx> {
2733 // NOTE(eddyb) this is private to avoid using it from outside of
2734 // `fn_abi_of_instance` - any other uses are either too high-level
2735 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2736 // or should go through `FnAbi` instead, to avoid losing any
2737 // adjustments `fn_abi_of_instance` might be performing.
2738 fn fn_sig_for_fn_abi(
2741 param_env: ty::ParamEnv<'tcx>,
2742 ) -> ty::PolyFnSig<'tcx> {
2743 let ty = self.ty(tcx, param_env);
2746 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2747 // parameters unused if they show up in the signature, but not in the `mir::Body`
2748 // (i.e. due to being inside a projection that got normalized, see
2749 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2750 // track of a polymorphization `ParamEnv` to allow normalizing later.
2751 let mut sig = match *ty.kind() {
2752 ty::FnDef(def_id, substs) => tcx
2753 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2754 .subst(tcx, substs),
2755 _ => unreachable!(),
2758 if let ty::InstanceDef::VtableShim(..) = self.def {
2759 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2760 sig = sig.map_bound(|mut sig| {
2761 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2762 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2763 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2769 ty::Closure(def_id, substs) => {
2770 let sig = substs.as_closure().sig();
2772 let bound_vars = tcx.mk_bound_variable_kinds(
2775 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2777 let br = ty::BoundRegion {
2778 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2779 kind: ty::BoundRegionKind::BrEnv,
2781 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2782 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2784 let sig = sig.skip_binder();
2785 ty::Binder::bind_with_vars(
2787 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2796 ty::Generator(_, substs, _) => {
2797 let sig = substs.as_generator().poly_sig();
2799 let bound_vars = tcx.mk_bound_variable_kinds(
2802 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2804 let br = ty::BoundRegion {
2805 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2806 kind: ty::BoundRegionKind::BrEnv,
2808 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2809 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2811 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2812 let pin_adt_ref = tcx.adt_def(pin_did);
2813 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2814 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2816 let sig = sig.skip_binder();
2817 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2818 let state_adt_ref = tcx.adt_def(state_did);
2819 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2820 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2821 ty::Binder::bind_with_vars(
2823 [env_ty, sig.resume_ty].iter(),
2826 hir::Unsafety::Normal,
2827 rustc_target::spec::abi::Abi::Rust,
2832 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2837 /// Calculates whether a function's ABI can unwind or not.
2839 /// This takes two primary parameters:
2841 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2842 /// codegen attrs for a defined function. For function pointers this set of
2843 /// flags is the empty set. This is only applicable for Rust-defined
2844 /// functions, and generally isn't needed except for small optimizations where
2845 /// we try to say a function which otherwise might look like it could unwind
2846 /// doesn't actually unwind (such as for intrinsics and such).
2848 /// * `abi` - this is the ABI that the function is defined with. This is the
2849 /// primary factor for determining whether a function can unwind or not.
2851 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2852 /// panics are implemented with unwinds on most platform (when
2853 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2854 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2855 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2856 /// defined for each ABI individually, but it always corresponds to some form of
2857 /// stack-based unwinding (the exact mechanism of which varies
2858 /// platform-by-platform).
2860 /// Rust functions are classified whether or not they can unwind based on the
2861 /// active "panic strategy". In other words Rust functions are considered to
2862 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2863 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2864 /// only if the final panic mode is panic=abort. In this scenario any code
2865 /// previously compiled assuming that a function can unwind is still correct, it
2866 /// just never happens to actually unwind at runtime.
2868 /// This function's answer to whether or not a function can unwind is quite
2869 /// impactful throughout the compiler. This affects things like:
2871 /// * Calling a function which can't unwind means codegen simply ignores any
2872 /// associated unwinding cleanup.
2873 /// * Calling a function which can unwind from a function which can't unwind
2874 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2875 /// aborts the process.
2876 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2877 /// affects various optimizations and codegen.
2879 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2880 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2881 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2882 /// might (from a foreign exception or similar).
2884 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2885 if let Some(did) = fn_def_id {
2886 // Special attribute for functions which can't unwind.
2887 if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2891 // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2893 // This is not part of `codegen_fn_attrs` as it can differ between crates
2894 // and therefore cannot be computed in core.
2895 if tcx.sess.opts.debugging_opts.panic_in_drop == PanicStrategy::Abort {
2896 if Some(did) == tcx.lang_items().drop_in_place_fn() {
2902 // Otherwise if this isn't special then unwinding is generally determined by
2903 // the ABI of the itself. ABIs like `C` have variants which also
2904 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2905 // ABIs have such an option. Otherwise the only other thing here is Rust
2906 // itself, and those ABIs are determined by the panic strategy configured
2907 // for this compilation.
2909 // Unfortunately at this time there's also another caveat. Rust [RFC
2910 // 2945][rfc] has been accepted and is in the process of being implemented
2911 // and stabilized. In this interim state we need to deal with historical
2912 // rustc behavior as well as plan for future rustc behavior.
2914 // Historically functions declared with `extern "C"` were marked at the
2915 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2916 // or not. This is UB for functions in `panic=unwind` mode that then
2917 // actually panic and unwind. Note that this behavior is true for both
2918 // externally declared functions as well as Rust-defined function.
2920 // To fix this UB rustc would like to change in the future to catch unwinds
2921 // from function calls that may unwind within a Rust-defined `extern "C"`
2922 // function and forcibly abort the process, thereby respecting the
2923 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2924 // ready to roll out, so determining whether or not the `C` family of ABIs
2925 // unwinds is conditional not only on their definition but also whether the
2926 // `#![feature(c_unwind)]` feature gate is active.
2928 // Note that this means that unlike historical compilers rustc now, by
2929 // default, unconditionally thinks that the `C` ABI may unwind. This will
2930 // prevent some optimization opportunities, however, so we try to scope this
2931 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2932 // to `panic=abort`).
2934 // Eventually the check against `c_unwind` here will ideally get removed and
2935 // this'll be a little cleaner as it'll be a straightforward check of the
2938 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2944 | Stdcall { unwind }
2945 | Fastcall { unwind }
2946 | Vectorcall { unwind }
2947 | Thiscall { unwind }
2950 | SysV64 { unwind } => {
2952 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2960 | AvrNonBlockingInterrupt
2961 | CCmseNonSecureCall
2965 | Unadjusted => false,
2966 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2971 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2972 use rustc_target::spec::abi::Abi::*;
2973 match tcx.sess.target.adjust_abi(abi) {
2974 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2976 // It's the ABI's job to select this, not ours.
2977 System { .. } => bug!("system abi should be selected elsewhere"),
2978 EfiApi => bug!("eficall abi should be selected elsewhere"),
2980 Stdcall { .. } => Conv::X86Stdcall,
2981 Fastcall { .. } => Conv::X86Fastcall,
2982 Vectorcall { .. } => Conv::X86VectorCall,
2983 Thiscall { .. } => Conv::X86ThisCall,
2984 C { .. } => Conv::C,
2985 Unadjusted => Conv::C,
2986 Win64 { .. } => Conv::X86_64Win64,
2987 SysV64 { .. } => Conv::X86_64SysV,
2988 Aapcs { .. } => Conv::ArmAapcs,
2989 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2990 PtxKernel => Conv::PtxKernel,
2991 Msp430Interrupt => Conv::Msp430Intr,
2992 X86Interrupt => Conv::X86Intr,
2993 AmdGpuKernel => Conv::AmdGpuKernel,
2994 AvrInterrupt => Conv::AvrInterrupt,
2995 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2998 // These API constants ought to be more specific...
2999 Cdecl { .. } => Conv::C,
3003 /// Error produced by attempting to compute or adjust a `FnAbi`.
3004 #[derive(Copy, Clone, Debug, HashStable)]
3005 pub enum FnAbiError<'tcx> {
3006 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3007 Layout(LayoutError<'tcx>),
3009 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3010 AdjustForForeignAbi(call::AdjustForForeignAbiError),
3013 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3014 fn from(err: LayoutError<'tcx>) -> Self {
3019 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3020 fn from(err: call::AdjustForForeignAbiError) -> Self {
3021 Self::AdjustForForeignAbi(err)
3025 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3026 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3028 Self::Layout(err) => err.fmt(f),
3029 Self::AdjustForForeignAbi(err) => err.fmt(f),
3034 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3035 // just for error handling.
3037 pub enum FnAbiRequest<'tcx> {
3038 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3039 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3042 /// Trait for contexts that want to be able to compute `FnAbi`s.
3043 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3044 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3045 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3046 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3047 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3049 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3050 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3052 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3053 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3054 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3055 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3056 fn handle_fn_abi_err(
3058 err: FnAbiError<'tcx>,
3060 fn_abi_request: FnAbiRequest<'tcx>,
3061 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3064 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3065 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3066 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3068 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3069 /// instead, where the instance is an `InstanceDef::Virtual`.
3071 fn fn_abi_of_fn_ptr(
3073 sig: ty::PolyFnSig<'tcx>,
3074 extra_args: &'tcx ty::List<Ty<'tcx>>,
3075 ) -> Self::FnAbiOfResult {
3076 // FIXME(eddyb) get a better `span` here.
3077 let span = self.layout_tcx_at_span();
3078 let tcx = self.tcx().at(span);
3080 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3081 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3085 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3086 /// direct calls to an `fn`.
3088 /// NB: that includes virtual calls, which are represented by "direct calls"
3089 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3091 fn fn_abi_of_instance(
3093 instance: ty::Instance<'tcx>,
3094 extra_args: &'tcx ty::List<Ty<'tcx>>,
3095 ) -> Self::FnAbiOfResult {
3096 // FIXME(eddyb) get a better `span` here.
3097 let span = self.layout_tcx_at_span();
3098 let tcx = self.tcx().at(span);
3101 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3102 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3103 // we can get some kind of span even if one wasn't provided.
3104 // However, we don't do this early in order to avoid calling
3105 // `def_span` unconditionally (which may have a perf penalty).
3106 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3107 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3113 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3115 fn fn_abi_of_fn_ptr<'tcx>(
3117 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3118 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3119 let (param_env, (sig, extra_args)) = query.into_parts();
3121 LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3124 fn fn_abi_of_instance<'tcx>(
3126 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3127 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3128 let (param_env, (instance, extra_args)) = query.into_parts();
3130 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3132 let caller_location = if instance.def.requires_caller_location(tcx) {
3133 Some(tcx.caller_location_ty())
3138 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3142 Some(instance.def_id()),
3143 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3147 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3148 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3149 // arguments of this method, into a separate `struct`.
3150 fn fn_abi_new_uncached(
3152 sig: ty::PolyFnSig<'tcx>,
3153 extra_args: &[Ty<'tcx>],
3154 caller_location: Option<Ty<'tcx>>,
3155 fn_def_id: Option<DefId>,
3156 // FIXME(eddyb) replace this with something typed, like an `enum`.
3157 force_thin_self_ptr: bool,
3158 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3159 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3161 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3163 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3165 let mut inputs = sig.inputs();
3166 let extra_args = if sig.abi == RustCall {
3167 assert!(!sig.c_variadic && extra_args.is_empty());
3169 if let Some(input) = sig.inputs().last() {
3170 if let ty::Tuple(tupled_arguments) = input.kind() {
3171 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3175 "argument to function with \"rust-call\" ABI \
3181 "argument to function with \"rust-call\" ABI \
3186 assert!(sig.c_variadic || extra_args.is_empty());
3190 let target = &self.tcx.sess.target;
3191 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3192 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3193 let linux_s390x_gnu_like =
3194 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3195 let linux_sparc64_gnu_like =
3196 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3197 let linux_powerpc_gnu_like =
3198 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3200 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3202 // Handle safe Rust thin and fat pointers.
3203 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3205 layout: TyAndLayout<'tcx>,
3208 // Booleans are always a noundef i1 that needs to be zero-extended.
3209 if scalar.is_bool() {
3210 attrs.ext(ArgExtension::Zext);
3211 attrs.set(ArgAttribute::NoUndef);
3215 // Scalars which have invalid values cannot be undef.
3216 if !scalar.is_always_valid(self) {
3217 attrs.set(ArgAttribute::NoUndef);
3220 // Only pointer types handled below.
3221 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3223 if !valid_range.contains(0) {
3224 attrs.set(ArgAttribute::NonNull);
3227 if let Some(pointee) = layout.pointee_info_at(self, offset) {
3228 if let Some(kind) = pointee.safe {
3229 attrs.pointee_align = Some(pointee.align);
3231 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3232 // for the entire duration of the function as they can be deallocated
3233 // at any time. Set their valid size to 0.
3234 attrs.pointee_size = match kind {
3235 PointerKind::UniqueOwned => Size::ZERO,
3239 // `Box`, `&T`, and `&mut T` cannot be undef.
3240 // Note that this only applies to the value of the pointer itself;
3241 // this attribute doesn't make it UB for the pointed-to data to be undef.
3242 attrs.set(ArgAttribute::NoUndef);
3244 // `Box` pointer parameters never alias because ownership is transferred
3245 // `&mut` pointer parameters never alias other parameters,
3246 // or mutable global data
3248 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3249 // and can be marked as both `readonly` and `noalias`, as
3250 // LLVM's definition of `noalias` is based solely on memory
3251 // dependencies rather than pointer equality
3253 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3254 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3255 // or not to actually emit the attribute. It can also be controlled with the
3256 // `-Zmutable-noalias` debugging option.
3257 let no_alias = match kind {
3258 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3259 PointerKind::UniqueOwned => true,
3260 PointerKind::Frozen => !is_return,
3263 attrs.set(ArgAttribute::NoAlias);
3266 if kind == PointerKind::Frozen && !is_return {
3267 attrs.set(ArgAttribute::ReadOnly);
3270 if kind == PointerKind::UniqueBorrowed && !is_return {
3271 attrs.set(ArgAttribute::NoAliasMutRef);
3277 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3278 let is_return = arg_idx.is_none();
3280 let layout = self.layout_of(ty)?;
3281 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3282 // Don't pass the vtable, it's not an argument of the virtual fn.
3283 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3284 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3285 make_thin_self_ptr(self, layout)
3290 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3291 let mut attrs = ArgAttributes::new();
3292 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3296 if arg.layout.is_zst() {
3297 // For some forsaken reason, x86_64-pc-windows-gnu
3298 // doesn't ignore zero-sized struct arguments.
3299 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3303 && !linux_s390x_gnu_like
3304 && !linux_sparc64_gnu_like
3305 && !linux_powerpc_gnu_like)
3307 arg.mode = PassMode::Ignore;
3314 let mut fn_abi = FnAbi {
3315 ret: arg_of(sig.output(), None)?,
3319 .chain(extra_args.iter().copied())
3320 .chain(caller_location)
3322 .map(|(i, ty)| arg_of(ty, Some(i)))
3323 .collect::<Result<_, _>>()?,
3324 c_variadic: sig.c_variadic,
3325 fixed_count: inputs.len(),
3327 can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3329 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3330 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3331 Ok(self.tcx.arena.alloc(fn_abi))
3334 fn fn_abi_adjust_for_abi(
3336 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3338 ) -> Result<(), FnAbiError<'tcx>> {
3339 if abi == SpecAbi::Unadjusted {
3343 if abi == SpecAbi::Rust
3344 || abi == SpecAbi::RustCall
3345 || abi == SpecAbi::RustIntrinsic
3346 || abi == SpecAbi::PlatformIntrinsic
3348 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3349 if arg.is_ignore() {
3353 match arg.layout.abi {
3354 Abi::Aggregate { .. } => {}
3356 // This is a fun case! The gist of what this is doing is
3357 // that we want callers and callees to always agree on the
3358 // ABI of how they pass SIMD arguments. If we were to *not*
3359 // make these arguments indirect then they'd be immediates
3360 // in LLVM, which means that they'd used whatever the
3361 // appropriate ABI is for the callee and the caller. That
3362 // means, for example, if the caller doesn't have AVX
3363 // enabled but the callee does, then passing an AVX argument
3364 // across this boundary would cause corrupt data to show up.
3366 // This problem is fixed by unconditionally passing SIMD
3367 // arguments through memory between callers and callees
3368 // which should get them all to agree on ABI regardless of
3369 // target feature sets. Some more information about this
3370 // issue can be found in #44367.
3372 // Note that the platform intrinsic ABI is exempt here as
3373 // that's how we connect up to LLVM and it's unstable
3374 // anyway, we control all calls to it in libstd.
3376 if abi != SpecAbi::PlatformIntrinsic
3377 && self.tcx.sess.target.simd_types_indirect =>
3379 arg.make_indirect();
3386 let size = arg.layout.size;
3387 if arg.layout.is_unsized() || size > Pointer.size(self) {
3388 arg.make_indirect();
3390 // We want to pass small aggregates as immediates, but using
3391 // a LLVM aggregate type for this leads to bad optimizations,
3392 // so we pick an appropriately sized integer type instead.
3393 arg.cast_to(Reg { kind: RegKind::Integer, size });
3396 fixup(&mut fn_abi.ret);
3397 for arg in &mut fn_abi.args {
3401 fn_abi.adjust_for_foreign_abi(self, abi)?;
3408 fn make_thin_self_ptr<'tcx>(
3409 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3410 layout: TyAndLayout<'tcx>,
3411 ) -> TyAndLayout<'tcx> {
3413 let fat_pointer_ty = if layout.is_unsized() {
3414 // unsized `self` is passed as a pointer to `self`
3415 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3416 tcx.mk_mut_ptr(layout.ty)
3419 Abi::ScalarPair(..) => (),
3420 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3423 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3424 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3425 // elsewhere in the compiler as a method on a `dyn Trait`.
3426 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3427 // get a built-in pointer type
3428 let mut fat_pointer_layout = layout;
3429 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3430 && !fat_pointer_layout.ty.is_region_ptr()
3432 for i in 0..fat_pointer_layout.fields.count() {
3433 let field_layout = fat_pointer_layout.field(cx, i);
3435 if !field_layout.is_zst() {
3436 fat_pointer_layout = field_layout;
3437 continue 'descend_newtypes;
3441 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3444 fat_pointer_layout.ty
3447 // we now have a type like `*mut RcBox<dyn Trait>`
3448 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3449 // this is understood as a special case elsewhere in the compiler
3450 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3455 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3456 // should always work because the type is always `*mut ()`.
3457 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()