1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable};
7 use rustc_attr as attr;
9 use rustc_hir::def_id::DefId;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
25 use std::num::NonZeroUsize;
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
31 pub fn provide(providers: &mut ty::query::Providers) {
33 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
36 pub trait IntegerExt {
37 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
50 impl IntegerExt for Integer {
52 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53 match (*self, signed) {
54 (I8, false) => tcx.types.u8,
55 (I16, false) => tcx.types.u16,
56 (I32, false) => tcx.types.u32,
57 (I64, false) => tcx.types.u64,
58 (I128, false) => tcx.types.u128,
59 (I8, true) => tcx.types.i8,
60 (I16, true) => tcx.types.i16,
61 (I32, true) => tcx.types.i32,
62 (I64, true) => tcx.types.i64,
63 (I128, true) => tcx.types.i128,
67 /// Gets the Integer type from an attr::IntType.
68 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69 let dl = cx.data_layout();
72 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78 dl.ptr_sized_integer()
83 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
86 ty::IntTy::I16 => I16,
87 ty::IntTy::I32 => I32,
88 ty::IntTy::I64 => I64,
89 ty::IntTy::I128 => I128,
90 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
93 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
96 ty::UintTy::U16 => I16,
97 ty::UintTy::U32 => I32,
98 ty::UintTy::U64 => I64,
99 ty::UintTy::U128 => I128,
100 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
104 /// Finds the appropriate Integer type and signedness for the given
105 /// signed discriminant range and `#[repr]` attribute.
106 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107 /// that shouldn't affect anything, other than maybe debuginfo.
114 ) -> (Integer, bool) {
115 // Theoretically, negative values could be larger in unsigned representation
116 // than the unsigned representation of the signed minimum. However, if there
117 // are any negative values, the only valid unsigned representation is u128
118 // which can fit all i128 values, so the result remains unaffected.
119 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
122 if let Some(ity) = repr.int {
123 let discr = Integer::from_attr(&tcx, ity);
124 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
127 "Integer::repr_discr: `#[repr]` hint too small for \
128 discriminant range of enum `{}",
132 return (discr, ity.is_signed());
135 let at_least = if repr.c() {
136 // This is usually I32, however it can be different on some platforms,
137 // notably hexagon and arm-none/thumb-none
138 tcx.data_layout().c_enum_min_size
140 // repr(Rust) enums try to be as small as possible
144 // If there are no negative values, we can use the unsigned fit.
146 (cmp::max(unsigned_fit, at_least), false)
148 (cmp::max(signed_fit, at_least), true)
153 pub trait PrimitiveExt {
154 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 impl PrimitiveExt for Primitive {
160 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
162 Int(i, signed) => i.to_ty(tcx, signed),
163 F32 => tcx.types.f32,
164 F64 => tcx.types.f64,
165 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169 /// Return an *integer* type matching this primitive.
170 /// Useful in particular when dealing with enum discriminants.
172 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
174 Int(i, signed) => i.to_ty(tcx, signed),
175 Pointer => tcx.types.usize,
176 F32 | F64 => bug!("floats do not have an int type"),
181 /// The first half of a fat pointer.
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
187 /// The second half of a fat pointer.
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
193 /// The maximum supported number of lanes in a SIMD vector.
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
203 SizeOverflow(Ty<'tcx>),
204 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
210 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211 LayoutError::SizeOverflow(ty) => {
212 write!(f, "values of the type `{}` are too big for the current architecture", ty)
214 LayoutError::NormalizationFailure(t, e) => write!(
216 "unable to determine layout for `{}` because `{}` cannot be normalized",
218 e.get_type_for_failure()
224 /// Enforce some basic invariants on layouts.
225 fn sanity_check_layout<'tcx>(
227 param_env: ty::ParamEnv<'tcx>,
228 layout: &TyAndLayout<'tcx>,
230 // Type-level uninhabitedness should always imply ABI uninhabitedness.
231 if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) {
232 assert!(layout.abi.is_uninhabited());
235 if cfg!(debug_assertions) {
236 fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) {
238 Abi::Scalar(scalar) => {
239 // No padding in scalars.
242 scalar.align(&tcx).abi,
243 "alignment mismatch between ABI and layout in {layout:#?}"
248 "size mismatch between ABI and layout in {layout:#?}"
251 Abi::Vector { count, element } => {
252 // No padding in vectors. Alignment can be strengthened, though.
254 layout.align().abi >= element.align(&tcx).abi,
255 "alignment mismatch between ABI and layout in {layout:#?}"
257 let size = element.size(&tcx) * count;
260 size.align_to(tcx.data_layout().vector_align(size).abi),
261 "size mismatch between ABI and layout in {layout:#?}"
264 Abi::ScalarPair(scalar1, scalar2) => {
265 // Sanity-check scalar pairs. These are a bit more flexible and support
266 // padding, but we can at least ensure both fields actually fit into the layout
267 // and the alignment requirement has not been weakened.
268 let align1 = scalar1.align(&tcx).abi;
269 let align2 = scalar2.align(&tcx).abi;
271 layout.align().abi >= cmp::max(align1, align2),
272 "alignment mismatch between ABI and layout in {layout:#?}",
274 let field2_offset = scalar1.size(&tcx).align_to(align2);
276 layout.size() >= field2_offset + scalar2.size(&tcx),
277 "size mismatch between ABI and layout in {layout:#?}"
280 Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
284 check_layout_abi(tcx, layout.layout);
286 if let Variants::Multiple { variants, .. } = &layout.variants {
287 for variant in variants {
288 check_layout_abi(tcx, *variant);
289 // No nested "multiple".
290 assert!(matches!(variant.variants(), Variants::Single { .. }));
291 // Skip empty variants.
292 if variant.size() == Size::ZERO
293 || variant.fields().count() == 0
294 || variant.abi().is_uninhabited()
296 // These are never actually accessed anyway, so we can skip them. (Note that
297 // sometimes, variants with fields have size 0, and sometimes, variants without
298 // fields have non-0 size.)
301 // Variants should have the same or a smaller size as the full thing.
302 if variant.size() > layout.size {
304 "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
306 variant.size().bytes(),
309 // The top-level ABI and the ABI of the variants should be coherent.
310 let abi_coherent = match (layout.abi, variant.abi()) {
311 (Abi::Scalar(..), Abi::Scalar(..)) => true,
312 (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
313 (Abi::Uninhabited, _) => true,
314 (Abi::Aggregate { .. }, _) => true,
319 "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
328 #[instrument(skip(tcx, query), level = "debug")]
331 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
332 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
333 ty::tls::with_related_context(tcx, move |icx| {
334 let (param_env, ty) = query.into_parts();
337 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
338 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
341 // Update the ImplicitCtxt to increase the layout_depth
342 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
344 ty::tls::enter_context(&icx, |_| {
345 let param_env = param_env.with_reveal_all_normalized(tcx);
346 let unnormalized_ty = ty;
348 // FIXME: We might want to have two different versions of `layout_of`:
349 // One that can be called after typecheck has completed and can use
350 // `normalize_erasing_regions` here and another one that can be called
351 // before typecheck has completed and uses `try_normalize_erasing_regions`.
352 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
354 Err(normalization_error) => {
355 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
359 if ty != unnormalized_ty {
360 // Ensure this layout is also cached for the normalized type.
361 return tcx.layout_of(param_env.and(ty));
364 let cx = LayoutCx { tcx, param_env };
366 let layout = cx.layout_of_uncached(ty)?;
367 let layout = TyAndLayout { ty, layout };
369 cx.record_layout_for_printing(layout);
371 sanity_check_layout(tcx, param_env, &layout);
378 pub struct LayoutCx<'tcx, C> {
380 pub param_env: ty::ParamEnv<'tcx>,
383 #[derive(Copy, Clone, Debug)]
385 /// A tuple, closure, or univariant which cannot be coerced to unsized.
387 /// A univariant, the last field of which may be coerced to unsized.
389 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
390 Prefixed(Size, Align),
393 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
394 // This is used to go between `memory_index` (source field order to memory order)
395 // and `inverse_memory_index` (memory order to source field order).
396 // See also `FieldsShape::Arbitrary::memory_index` for more details.
397 // FIXME(eddyb) build a better abstraction for permutations, if possible.
398 fn invert_mapping(map: &[u32]) -> Vec<u32> {
399 let mut inverse = vec![0; map.len()];
400 for i in 0..map.len() {
401 inverse[map[i] as usize] = i as u32;
406 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
407 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
408 let dl = self.data_layout();
409 let b_align = b.align(dl);
410 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
411 let b_offset = a.size(dl).align_to(b_align.abi);
412 let size = (b_offset + b.size(dl)).align_to(align.abi);
414 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
415 // returns the last maximum.
416 let largest_niche = Niche::from_scalar(dl, b_offset, b)
418 .chain(Niche::from_scalar(dl, Size::ZERO, a))
419 .max_by_key(|niche| niche.available(dl));
422 variants: Variants::Single { index: VariantIdx::new(0) },
423 fields: FieldsShape::Arbitrary {
424 offsets: vec![Size::ZERO, b_offset],
425 memory_index: vec![0, 1],
427 abi: Abi::ScalarPair(a, b),
434 fn univariant_uninterned(
437 fields: &[TyAndLayout<'_>],
440 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
441 let dl = self.data_layout();
442 let pack = repr.pack;
443 if pack.is_some() && repr.align.is_some() {
444 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
445 return Err(LayoutError::Unknown(ty));
448 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
450 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
452 let optimize = !repr.inhibit_struct_field_reordering_opt();
455 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
456 let optimizing = &mut inverse_memory_index[..end];
457 let field_align = |f: &TyAndLayout<'_>| {
458 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
461 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
462 // the field ordering to try and catch some code making assumptions about layouts
463 // we don't guarantee
464 if repr.can_randomize_type_layout() {
465 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
466 // randomize field ordering with
467 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
469 // Shuffle the ordering of the fields
470 optimizing.shuffle(&mut rng);
472 // Otherwise we just leave things alone and actually optimize the type's fields
475 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
476 optimizing.sort_by_key(|&x| {
477 // Place ZSTs first to avoid "interesting offsets",
478 // especially with only one or two non-ZST fields.
479 let f = &fields[x as usize];
480 (!f.is_zst(), cmp::Reverse(field_align(f)))
484 StructKind::Prefixed(..) => {
485 // Sort in ascending alignment so that the layout stays optimal
486 // regardless of the prefix
487 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
491 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
492 // regardless of the status of `-Z randomize-layout`
496 // inverse_memory_index holds field indices by increasing memory offset.
497 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
498 // We now write field offsets to the corresponding offset slot;
499 // field 5 with offset 0 puts 0 in offsets[5].
500 // At the bottom of this function, we invert `inverse_memory_index` to
501 // produce `memory_index` (see `invert_mapping`).
503 let mut sized = true;
504 let mut offsets = vec![Size::ZERO; fields.len()];
505 let mut offset = Size::ZERO;
506 let mut largest_niche = None;
507 let mut largest_niche_available = 0;
509 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
511 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
512 align = align.max(AbiAndPrefAlign::new(prefix_align));
513 offset = prefix_size.align_to(prefix_align);
516 for &i in &inverse_memory_index {
517 let field = fields[i as usize];
519 self.tcx.sess.delay_span_bug(
522 "univariant: field #{} of `{}` comes after unsized field",
529 if field.is_unsized() {
533 // Invariant: offset < dl.obj_size_bound() <= 1<<61
534 let field_align = if let Some(pack) = pack {
535 field.align.min(AbiAndPrefAlign::new(pack))
539 offset = offset.align_to(field_align.abi);
540 align = align.max(field_align);
542 debug!("univariant offset: {:?} field: {:#?}", offset, field);
543 offsets[i as usize] = offset;
545 if !repr.hide_niche() {
546 if let Some(mut niche) = field.largest_niche {
547 let available = niche.available(dl);
548 if available > largest_niche_available {
549 largest_niche_available = available;
550 niche.offset += offset;
551 largest_niche = Some(niche);
556 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
559 if let Some(repr_align) = repr.align {
560 align = align.max(AbiAndPrefAlign::new(repr_align));
563 debug!("univariant min_size: {:?}", offset);
564 let min_size = offset;
566 // As stated above, inverse_memory_index holds field indices by increasing offset.
567 // This makes it an already-sorted view of the offsets vec.
568 // To invert it, consider:
569 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
570 // Field 5 would be the first element, so memory_index is i:
571 // Note: if we didn't optimize, it's already right.
574 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
576 let size = min_size.align_to(align.abi);
577 let mut abi = Abi::Aggregate { sized };
579 // Unpack newtype ABIs and find scalar pairs.
580 if sized && size.bytes() > 0 {
581 // All other fields must be ZSTs.
582 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
584 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
585 // We have exactly one non-ZST field.
586 (Some((i, field)), None, None) => {
587 // Field fills the struct and it has a scalar or scalar pair ABI.
588 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
591 // For plain scalars, or vectors of them, we can't unpack
592 // newtypes for `#[repr(C)]`, as that affects C ABIs.
593 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
596 // But scalar pairs are Rust-specific and get
597 // treated as aggregates by C ABIs anyway.
598 Abi::ScalarPair(..) => {
606 // Two non-ZST fields, and they're both scalars.
607 (Some((i, a)), Some((j, b)), None) => {
608 match (a.abi, b.abi) {
609 (Abi::Scalar(a), Abi::Scalar(b)) => {
610 // Order by the memory placement, not source order.
611 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
616 let pair = self.scalar_pair(a, b);
617 let pair_offsets = match pair.fields {
618 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
619 assert_eq!(memory_index, &[0, 1]);
624 if offsets[i] == pair_offsets[0]
625 && offsets[j] == pair_offsets[1]
626 && align == pair.align
629 // We can use `ScalarPair` only when it matches our
630 // already computed layout (including `#[repr(C)]`).
642 if fields.iter().any(|f| f.abi.is_uninhabited()) {
643 abi = Abi::Uninhabited;
647 variants: Variants::Single { index: VariantIdx::new(0) },
648 fields: FieldsShape::Arbitrary { offsets, memory_index },
656 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
658 let param_env = self.param_env;
659 let dl = self.data_layout();
660 let scalar_unit = |value: Primitive| {
661 let size = value.size(dl);
662 assert!(size.bits() <= 128);
663 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
666 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
668 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
669 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
671 debug_assert!(!ty.has_infer_types_or_consts());
673 Ok(match *ty.kind() {
675 ty::Bool => tcx.intern_layout(LayoutS::scalar(
677 Scalar::Initialized {
678 value: Int(I8, false),
679 valid_range: WrappingRange { start: 0, end: 1 },
682 ty::Char => tcx.intern_layout(LayoutS::scalar(
684 Scalar::Initialized {
685 value: Int(I32, false),
686 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
689 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
690 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
691 ty::Float(fty) => scalar(match fty {
692 ty::FloatTy::F32 => F32,
693 ty::FloatTy::F64 => F64,
696 let mut ptr = scalar_unit(Pointer);
697 ptr.valid_range_mut().start = 1;
698 tcx.intern_layout(LayoutS::scalar(self, ptr))
702 ty::Never => tcx.intern_layout(LayoutS {
703 variants: Variants::Single { index: VariantIdx::new(0) },
704 fields: FieldsShape::Primitive,
705 abi: Abi::Uninhabited,
711 // Potentially-wide pointers.
712 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
713 let mut data_ptr = scalar_unit(Pointer);
714 if !ty.is_unsafe_ptr() {
715 data_ptr.valid_range_mut().start = 1;
718 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
719 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
720 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
723 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
724 let metadata = match unsized_part.kind() {
726 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
728 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
730 let mut vtable = scalar_unit(Pointer);
731 vtable.valid_range_mut().start = 1;
734 _ => return Err(LayoutError::Unknown(unsized_part)),
737 // Effectively a (ptr, meta) tuple.
738 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
741 // Arrays and slices.
742 ty::Array(element, mut count) => {
743 if count.has_projections() {
744 count = tcx.normalize_erasing_regions(param_env, count);
745 if count.has_projections() {
746 return Err(LayoutError::Unknown(ty));
750 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
751 let element = self.layout_of(element)?;
753 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
756 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
759 Abi::Aggregate { sized: true }
762 let largest_niche = if count != 0 { element.largest_niche } else { None };
764 tcx.intern_layout(LayoutS {
765 variants: Variants::Single { index: VariantIdx::new(0) },
766 fields: FieldsShape::Array { stride: element.size, count },
769 align: element.align,
773 ty::Slice(element) => {
774 let element = self.layout_of(element)?;
775 tcx.intern_layout(LayoutS {
776 variants: Variants::Single { index: VariantIdx::new(0) },
777 fields: FieldsShape::Array { stride: element.size, count: 0 },
778 abi: Abi::Aggregate { sized: false },
780 align: element.align,
784 ty::Str => tcx.intern_layout(LayoutS {
785 variants: Variants::Single { index: VariantIdx::new(0) },
786 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
787 abi: Abi::Aggregate { sized: false },
794 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
795 ty::Dynamic(..) | ty::Foreign(..) => {
796 let mut unit = self.univariant_uninterned(
799 &ReprOptions::default(),
800 StructKind::AlwaysSized,
803 Abi::Aggregate { ref mut sized } => *sized = false,
806 tcx.intern_layout(unit)
809 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
811 ty::Closure(_, ref substs) => {
812 let tys = substs.as_closure().upvar_tys();
814 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
815 &ReprOptions::default(),
816 StructKind::AlwaysSized,
822 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
825 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
826 &ReprOptions::default(),
831 // SIMD vector types.
832 ty::Adt(def, substs) if def.repr().simd() => {
833 if !def.is_struct() {
834 // Should have yielded E0517 by now.
835 tcx.sess.delay_span_bug(
837 "#[repr(simd)] was applied to an ADT that is not a struct",
839 return Err(LayoutError::Unknown(ty));
842 // Supported SIMD vectors are homogeneous ADTs with at least one field:
844 // * #[repr(simd)] struct S(T, T, T, T);
845 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
846 // * #[repr(simd)] struct S([T; 4])
848 // where T is a primitive scalar (integer/float/pointer).
850 // SIMD vectors with zero fields are not supported.
851 // (should be caught by typeck)
852 if def.non_enum_variant().fields.is_empty() {
853 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
856 // Type of the first ADT field:
857 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
859 // Heterogeneous SIMD vectors are not supported:
860 // (should be caught by typeck)
861 for fi in &def.non_enum_variant().fields {
862 if fi.ty(tcx, substs) != f0_ty {
863 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
867 // The element type and number of elements of the SIMD vector
868 // are obtained from:
870 // * the element type and length of the single array field, if
871 // the first field is of array type, or
873 // * the homogenous field type and the number of fields.
874 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
875 // First ADT field is an array:
877 // SIMD vectors with multiple array fields are not supported:
878 // (should be caught by typeck)
879 if def.non_enum_variant().fields.len() != 1 {
880 tcx.sess.fatal(&format!(
881 "monomorphising SIMD type `{}` with more than one array field",
886 // Extract the number of elements from the layout of the array field:
887 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
888 return Err(LayoutError::Unknown(ty));
891 (*e_ty, *count, true)
893 // First ADT field is not an array:
894 (f0_ty, def.non_enum_variant().fields.len() as _, false)
897 // SIMD vectors of zero length are not supported.
898 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
901 // Can't be caught in typeck if the array length is generic.
903 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
904 } else if e_len > MAX_SIMD_LANES {
905 tcx.sess.fatal(&format!(
906 "monomorphising SIMD type `{}` of length greater than {}",
911 // Compute the ABI of the element type:
912 let e_ly = self.layout_of(e_ty)?;
913 let Abi::Scalar(e_abi) = e_ly.abi else {
914 // This error isn't caught in typeck, e.g., if
915 // the element type of the vector is generic.
916 tcx.sess.fatal(&format!(
917 "monomorphising SIMD type `{}` with a non-primitive-scalar \
918 (integer/float/pointer) element type `{}`",
923 // Compute the size and alignment of the vector:
924 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
925 let align = dl.vector_align(size);
926 let size = size.align_to(align.abi);
928 // Compute the placement of the vector fields:
929 let fields = if is_array {
930 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
932 FieldsShape::Array { stride: e_ly.size, count: e_len }
935 tcx.intern_layout(LayoutS {
936 variants: Variants::Single { index: VariantIdx::new(0) },
938 abi: Abi::Vector { element: e_abi, count: e_len },
939 largest_niche: e_ly.largest_niche,
946 ty::Adt(def, substs) => {
947 // Cache the field layouts.
954 .map(|field| self.layout_of(field.ty(tcx, substs)))
955 .collect::<Result<Vec<_>, _>>()
957 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
960 if def.repr().pack.is_some() && def.repr().align.is_some() {
961 self.tcx.sess.delay_span_bug(
962 tcx.def_span(def.did()),
963 "union cannot be packed and aligned",
965 return Err(LayoutError::Unknown(ty));
969 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
971 if let Some(repr_align) = def.repr().align {
972 align = align.max(AbiAndPrefAlign::new(repr_align));
975 let optimize = !def.repr().inhibit_union_abi_opt();
976 let mut size = Size::ZERO;
977 let mut abi = Abi::Aggregate { sized: true };
978 let index = VariantIdx::new(0);
979 for field in &variants[index] {
980 assert!(!field.is_unsized());
981 align = align.max(field.align);
983 // If all non-ZST fields have the same ABI, forward this ABI
984 if optimize && !field.is_zst() {
985 // Discard valid range information and allow undef
986 let field_abi = match field.abi {
987 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
988 Abi::ScalarPair(x, y) => {
989 Abi::ScalarPair(x.to_union(), y.to_union())
991 Abi::Vector { element: x, count } => {
992 Abi::Vector { element: x.to_union(), count }
994 Abi::Uninhabited | Abi::Aggregate { .. } => {
995 Abi::Aggregate { sized: true }
999 if size == Size::ZERO {
1000 // first non ZST: initialize 'abi'
1002 } else if abi != field_abi {
1003 // different fields have different ABI: reset to Aggregate
1004 abi = Abi::Aggregate { sized: true };
1008 size = cmp::max(size, field.size);
1011 if let Some(pack) = def.repr().pack {
1012 align = align.min(AbiAndPrefAlign::new(pack));
1015 return Ok(tcx.intern_layout(LayoutS {
1016 variants: Variants::Single { index },
1017 fields: FieldsShape::Union(
1018 NonZeroUsize::new(variants[index].len())
1019 .ok_or(LayoutError::Unknown(ty))?,
1022 largest_niche: None,
1024 size: size.align_to(align.abi),
1028 // A variant is absent if it's uninhabited and only has ZST fields.
1029 // Present uninhabited variants only require space for their fields,
1030 // but *not* an encoding of the discriminant (e.g., a tag value).
1031 // See issue #49298 for more details on the need to leave space
1032 // for non-ZST uninhabited data (mostly partial initialization).
1033 let absent = |fields: &[TyAndLayout<'_>]| {
1034 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
1035 let is_zst = fields.iter().all(|f| f.is_zst());
1036 uninhabited && is_zst
1038 let (present_first, present_second) = {
1039 let mut present_variants = variants
1041 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
1042 (present_variants.next(), present_variants.next())
1044 let present_first = match present_first {
1045 Some(present_first) => present_first,
1046 // Uninhabited because it has no variants, or only absent ones.
1047 None if def.is_enum() => {
1048 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
1050 // If it's a struct, still compute a layout so that we can still compute the
1052 None => VariantIdx::new(0),
1055 let is_struct = !def.is_enum() ||
1056 // Only one variant is present.
1057 (present_second.is_none() &&
1058 // Representation optimizations are allowed.
1059 !def.repr().inhibit_enum_layout_opt());
1061 // Struct, or univariant enum equivalent to a struct.
1062 // (Typechecking will reject discriminant-sizing attrs.)
1064 let v = present_first;
1065 let kind = if def.is_enum() || variants[v].is_empty() {
1066 StructKind::AlwaysSized
1068 let param_env = tcx.param_env(def.did());
1069 let last_field = def.variant(v).fields.last().unwrap();
1071 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
1073 StructKind::MaybeUnsized
1075 StructKind::AlwaysSized
1079 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
1080 st.variants = Variants::Single { index: v };
1081 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1083 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1084 // the asserts ensure that we are not using the
1085 // `#[rustc_layout_scalar_valid_range(n)]`
1086 // attribute to widen the range of anything as that would probably
1087 // result in UB somewhere
1088 // FIXME(eddyb) the asserts are probably not needed,
1089 // as larger validity ranges would result in missed
1090 // optimizations, *not* wrongly assuming the inner
1091 // value is valid. e.g. unions enlarge validity ranges,
1092 // because the values may be uninitialized.
1093 if let Bound::Included(start) = start {
1094 // FIXME(eddyb) this might be incorrect - it doesn't
1095 // account for wrap-around (end < start) ranges.
1096 let valid_range = scalar.valid_range_mut();
1097 assert!(valid_range.start <= start);
1098 valid_range.start = start;
1100 if let Bound::Included(end) = end {
1101 // FIXME(eddyb) this might be incorrect - it doesn't
1102 // account for wrap-around (end < start) ranges.
1103 let valid_range = scalar.valid_range_mut();
1104 assert!(valid_range.end >= end);
1105 valid_range.end = end;
1108 // Update `largest_niche` if we have introduced a larger niche.
1109 let niche = if def.repr().hide_niche() {
1112 Niche::from_scalar(dl, Size::ZERO, *scalar)
1114 if let Some(niche) = niche {
1115 match st.largest_niche {
1116 Some(largest_niche) => {
1117 // Replace the existing niche even if they're equal,
1118 // because this one is at a lower offset.
1119 if largest_niche.available(dl) <= niche.available(dl) {
1120 st.largest_niche = Some(niche);
1123 None => st.largest_niche = Some(niche),
1128 start == Bound::Unbounded && end == Bound::Unbounded,
1129 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1135 return Ok(tcx.intern_layout(st));
1138 // At this point, we have handled all unions and
1139 // structs. (We have also handled univariant enums
1140 // that allow representation optimization.)
1141 assert!(def.is_enum());
1143 // The current code for niche-filling relies on variant indices
1144 // instead of actual discriminants, so dataful enums with
1145 // explicit discriminants (RFC #2363) would misbehave.
1146 let no_explicit_discriminants = def
1149 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1151 let mut niche_filling_layout = None;
1153 // Niche-filling enum optimization.
1154 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1155 let mut dataful_variant = None;
1156 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1158 // Find one non-ZST variant.
1159 'variants: for (v, fields) in variants.iter_enumerated() {
1165 if dataful_variant.is_none() {
1166 dataful_variant = Some(v);
1169 dataful_variant = None;
1174 niche_variants = *niche_variants.start().min(&v)..=v;
1177 if niche_variants.start() > niche_variants.end() {
1178 dataful_variant = None;
1181 if let Some(i) = dataful_variant {
1182 let count = (niche_variants.end().as_u32()
1183 - niche_variants.start().as_u32()
1186 // Find the field with the largest niche
1187 let niche_candidate = variants[i]
1190 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1191 .max_by_key(|(_, niche)| niche.available(dl));
1193 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1194 niche_candidate.and_then(|(field_index, niche)| {
1195 Some((field_index, niche, niche.reserve(self, count)?))
1198 let mut align = dl.aggregate_align;
1202 let mut st = self.univariant_uninterned(
1206 StructKind::AlwaysSized,
1208 st.variants = Variants::Single { index: j };
1210 align = align.max(st.align);
1212 Ok(tcx.intern_layout(st))
1214 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1216 let offset = st[i].fields().offset(field_index) + niche.offset;
1217 let size = st[i].size();
1219 let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1223 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1224 Abi::ScalarPair(first, second) => {
1225 // Only the niche is guaranteed to be initialised,
1226 // so use union layout for the other primitive.
1227 if offset.bytes() == 0 {
1228 Abi::ScalarPair(niche_scalar, second.to_union())
1230 Abi::ScalarPair(first.to_union(), niche_scalar)
1233 _ => Abi::Aggregate { sized: true },
1237 let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1239 niche_filling_layout = Some(LayoutS {
1240 variants: Variants::Multiple {
1242 tag_encoding: TagEncoding::Niche {
1250 fields: FieldsShape::Arbitrary {
1251 offsets: vec![offset],
1252 memory_index: vec![0],
1263 let (mut min, mut max) = (i128::MAX, i128::MIN);
1264 let discr_type = def.repr().discr_type();
1265 let bits = Integer::from_attr(self, discr_type).size().bits();
1266 for (i, discr) in def.discriminants(tcx) {
1267 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1270 let mut x = discr.val as i128;
1271 if discr_type.is_signed() {
1272 // sign extend the raw representation to be an i128
1273 x = (x << (128 - bits)) >> (128 - bits);
1282 // We might have no inhabited variants, so pretend there's at least one.
1283 if (min, max) == (i128::MAX, i128::MIN) {
1287 assert!(min <= max, "discriminant range is {}...{}", min, max);
1288 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1290 let mut align = dl.aggregate_align;
1291 let mut size = Size::ZERO;
1293 // We're interested in the smallest alignment, so start large.
1294 let mut start_align = Align::from_bytes(256).unwrap();
1295 assert_eq!(Integer::for_align(dl, start_align), None);
1297 // repr(C) on an enum tells us to make a (tag, union) layout,
1298 // so we need to grow the prefix alignment to be at least
1299 // the alignment of the union. (This value is used both for
1300 // determining the alignment of the overall enum, and the
1301 // determining the alignment of the payload after the tag.)
1302 let mut prefix_align = min_ity.align(dl).abi;
1304 for fields in &variants {
1305 for field in fields {
1306 prefix_align = prefix_align.max(field.align.abi);
1311 // Create the set of structs that represent each variant.
1312 let mut layout_variants = variants
1314 .map(|(i, field_layouts)| {
1315 let mut st = self.univariant_uninterned(
1319 StructKind::Prefixed(min_ity.size(), prefix_align),
1321 st.variants = Variants::Single { index: i };
1322 // Find the first field we can't move later
1323 // to make room for a larger discriminant.
1325 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1327 if !field.is_zst() || field.align.abi.bytes() != 1 {
1328 start_align = start_align.min(field.align.abi);
1332 size = cmp::max(size, st.size);
1333 align = align.max(st.align);
1336 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1338 // Align the maximum variant size to the largest alignment.
1339 size = size.align_to(align.abi);
1341 if size.bytes() >= dl.obj_size_bound() {
1342 return Err(LayoutError::SizeOverflow(ty));
1345 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1346 if typeck_ity < min_ity {
1347 // It is a bug if Layout decided on a greater discriminant size than typeck for
1348 // some reason at this point (based on values discriminant can take on). Mostly
1349 // because this discriminant will be loaded, and then stored into variable of
1350 // type calculated by typeck. Consider such case (a bug): typeck decided on
1351 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1352 // discriminant values. That would be a bug, because then, in codegen, in order
1353 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1354 // space necessary to represent would have to be discarded (or layout is wrong
1355 // on thinking it needs 16 bits)
1357 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1361 // However, it is fine to make discr type however large (as an optimisation)
1362 // after this point – we’ll just truncate the value we load in codegen.
1365 // Check to see if we should use a different type for the
1366 // discriminant. We can safely use a type with the same size
1367 // as the alignment of the first field of each variant.
1368 // We increase the size of the discriminant to avoid LLVM copying
1369 // padding when it doesn't need to. This normally causes unaligned
1370 // load/stores and excessive memcpy/memset operations. By using a
1371 // bigger integer size, LLVM can be sure about its contents and
1372 // won't be so conservative.
1374 // Use the initial field alignment
1375 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1378 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1381 // If the alignment is not larger than the chosen discriminant size,
1382 // don't use the alignment as the final size.
1386 // Patch up the variants' first few fields.
1387 let old_ity_size = min_ity.size();
1388 let new_ity_size = ity.size();
1389 for variant in &mut layout_variants {
1390 match variant.fields {
1391 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1393 if *i <= old_ity_size {
1394 assert_eq!(*i, old_ity_size);
1398 // We might be making the struct larger.
1399 if variant.size <= old_ity_size {
1400 variant.size = new_ity_size;
1408 let tag_mask = ity.size().unsigned_int_max();
1409 let tag = Scalar::Initialized {
1410 value: Int(ity, signed),
1411 valid_range: WrappingRange {
1412 start: (min as u128 & tag_mask),
1413 end: (max as u128 & tag_mask),
1416 let mut abi = Abi::Aggregate { sized: true };
1418 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1419 abi = Abi::Uninhabited;
1420 } else if tag.size(dl) == size {
1421 // Make sure we only use scalar layout when the enum is entirely its
1422 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1423 abi = Abi::Scalar(tag);
1425 // Try to use a ScalarPair for all tagged enums.
1426 let mut common_prim = None;
1427 let mut common_prim_initialized_in_all_variants = true;
1428 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1429 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1433 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1434 let (field, offset) = match (fields.next(), fields.next()) {
1436 common_prim_initialized_in_all_variants = false;
1439 (Some(pair), None) => pair,
1445 let prim = match field.abi {
1446 Abi::Scalar(scalar) => {
1447 common_prim_initialized_in_all_variants &=
1448 matches!(scalar, Scalar::Initialized { .. });
1456 if let Some(pair) = common_prim {
1457 // This is pretty conservative. We could go fancier
1458 // by conflating things like i32 and u32, or even
1459 // realising that (u8, u8) could just cohabit with
1461 if pair != (prim, offset) {
1466 common_prim = Some((prim, offset));
1469 if let Some((prim, offset)) = common_prim {
1470 let prim_scalar = if common_prim_initialized_in_all_variants {
1473 // Common prim might be uninit.
1474 Scalar::Union { value: prim }
1476 let pair = self.scalar_pair(tag, prim_scalar);
1477 let pair_offsets = match pair.fields {
1478 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1479 assert_eq!(memory_index, &[0, 1]);
1484 if pair_offsets[0] == Size::ZERO
1485 && pair_offsets[1] == *offset
1486 && align == pair.align
1487 && size == pair.size
1489 // We can use `ScalarPair` only when it matches our
1490 // already computed layout (including `#[repr(C)]`).
1496 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1497 // variants to ensure they are consistent. This is because a downcast is
1498 // semantically a NOP, and thus should not affect layout.
1499 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1500 for variant in &mut layout_variants {
1501 // We only do this for variants with fields; the others are not accessed anyway.
1502 // Also do not overwrite any already existing "clever" ABIs.
1503 if variant.fields.count() > 0
1504 && matches!(variant.abi, Abi::Aggregate { .. })
1507 // Also need to bump up the size and alignment, so that the entire value fits in here.
1508 variant.size = cmp::max(variant.size, size);
1509 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1514 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1516 let layout_variants =
1517 layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1519 let tagged_layout = LayoutS {
1520 variants: Variants::Multiple {
1522 tag_encoding: TagEncoding::Direct,
1524 variants: layout_variants,
1526 fields: FieldsShape::Arbitrary {
1527 offsets: vec![Size::ZERO],
1528 memory_index: vec![0],
1536 let best_layout = match (tagged_layout, niche_filling_layout) {
1537 (tagged_layout, Some(niche_filling_layout)) => {
1538 // Pick the smaller layout; otherwise,
1539 // pick the layout with the larger niche; otherwise,
1540 // pick tagged as it has simpler codegen.
1541 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1542 let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1543 (layout.size, cmp::Reverse(niche_size))
1546 (tagged_layout, None) => tagged_layout,
1549 tcx.intern_layout(best_layout)
1552 // Types with no meaningful known layout.
1553 ty::Projection(_) | ty::Opaque(..) => {
1554 // NOTE(eddyb) `layout_of` query should've normalized these away,
1555 // if that was possible, so there's no reason to try again here.
1556 return Err(LayoutError::Unknown(ty));
1559 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1560 bug!("Layout::compute: unexpected type `{}`", ty)
1563 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1564 return Err(LayoutError::Unknown(ty));
1570 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1571 #[derive(Clone, Debug, PartialEq)]
1572 enum SavedLocalEligibility {
1574 Assigned(VariantIdx),
1575 // FIXME: Use newtype_index so we aren't wasting bytes
1576 Ineligible(Option<u32>),
1579 // When laying out generators, we divide our saved local fields into two
1580 // categories: overlap-eligible and overlap-ineligible.
1582 // Those fields which are ineligible for overlap go in a "prefix" at the
1583 // beginning of the layout, and always have space reserved for them.
1585 // Overlap-eligible fields are only assigned to one variant, so we lay
1586 // those fields out for each variant and put them right after the
1589 // Finally, in the layout details, we point to the fields from the
1590 // variants they are assigned to. It is possible for some fields to be
1591 // included in multiple variants. No field ever "moves around" in the
1592 // layout; its offset is always the same.
1594 // Also included in the layout are the upvars and the discriminant.
1595 // These are included as fields on the "outer" layout; they are not part
1597 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1598 /// Compute the eligibility and assignment of each local.
1599 fn generator_saved_local_eligibility(
1601 info: &GeneratorLayout<'tcx>,
1602 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1603 use SavedLocalEligibility::*;
1605 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1606 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1608 // The saved locals not eligible for overlap. These will get
1609 // "promoted" to the prefix of our generator.
1610 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1612 // Figure out which of our saved locals are fields in only
1613 // one variant. The rest are deemed ineligible for overlap.
1614 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1615 for local in fields {
1616 match assignments[*local] {
1618 assignments[*local] = Assigned(variant_index);
1621 // We've already seen this local at another suspension
1622 // point, so it is no longer a candidate.
1624 "removing local {:?} in >1 variant ({:?}, {:?})",
1629 ineligible_locals.insert(*local);
1630 assignments[*local] = Ineligible(None);
1637 // Next, check every pair of eligible locals to see if they
1639 for local_a in info.storage_conflicts.rows() {
1640 let conflicts_a = info.storage_conflicts.count(local_a);
1641 if ineligible_locals.contains(local_a) {
1645 for local_b in info.storage_conflicts.iter(local_a) {
1646 // local_a and local_b are storage live at the same time, therefore they
1647 // cannot overlap in the generator layout. The only way to guarantee
1648 // this is if they are in the same variant, or one is ineligible
1649 // (which means it is stored in every variant).
1650 if ineligible_locals.contains(local_b)
1651 || assignments[local_a] == assignments[local_b]
1656 // If they conflict, we will choose one to make ineligible.
1657 // This is not always optimal; it's just a greedy heuristic that
1658 // seems to produce good results most of the time.
1659 let conflicts_b = info.storage_conflicts.count(local_b);
1660 let (remove, other) =
1661 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1662 ineligible_locals.insert(remove);
1663 assignments[remove] = Ineligible(None);
1664 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1668 // Count the number of variants in use. If only one of them, then it is
1669 // impossible to overlap any locals in our layout. In this case it's
1670 // always better to make the remaining locals ineligible, so we can
1671 // lay them out with the other locals in the prefix and eliminate
1672 // unnecessary padding bytes.
1674 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1675 for assignment in &assignments {
1676 if let Assigned(idx) = assignment {
1677 used_variants.insert(*idx);
1680 if used_variants.count() < 2 {
1681 for assignment in assignments.iter_mut() {
1682 *assignment = Ineligible(None);
1684 ineligible_locals.insert_all();
1688 // Write down the order of our locals that will be promoted to the prefix.
1690 for (idx, local) in ineligible_locals.iter().enumerate() {
1691 assignments[local] = Ineligible(Some(idx as u32));
1694 debug!("generator saved local assignments: {:?}", assignments);
1696 (ineligible_locals, assignments)
1699 /// Compute the full generator layout.
1700 fn generator_layout(
1703 def_id: hir::def_id::DefId,
1704 substs: SubstsRef<'tcx>,
1705 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1706 use SavedLocalEligibility::*;
1708 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1710 let Some(info) = tcx.generator_layout(def_id) else {
1711 return Err(LayoutError::Unknown(ty));
1713 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1715 // Build a prefix layout, including "promoting" all ineligible
1716 // locals as part of the prefix. We compute the layout of all of
1717 // these fields at once to get optimal packing.
1718 let tag_index = substs.as_generator().prefix_tys().count();
1720 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1721 let max_discr = (info.variant_fields.len() - 1) as u128;
1722 let discr_int = Integer::fit_unsigned(max_discr);
1723 let discr_int_ty = discr_int.to_ty(tcx, false);
1724 let tag = Scalar::Initialized {
1725 value: Primitive::Int(discr_int, false),
1726 valid_range: WrappingRange { start: 0, end: max_discr },
1728 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1729 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1731 let promoted_layouts = ineligible_locals
1733 .map(|local| subst_field(info.field_tys[local]))
1734 .map(|ty| tcx.mk_maybe_uninit(ty))
1735 .map(|ty| self.layout_of(ty));
1736 let prefix_layouts = substs
1739 .map(|ty| self.layout_of(ty))
1740 .chain(iter::once(Ok(tag_layout)))
1741 .chain(promoted_layouts)
1742 .collect::<Result<Vec<_>, _>>()?;
1743 let prefix = self.univariant_uninterned(
1746 &ReprOptions::default(),
1747 StructKind::AlwaysSized,
1750 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1752 // Split the prefix layout into the "outer" fields (upvars and
1753 // discriminant) and the "promoted" fields. Promoted fields will
1754 // get included in each variant that requested them in
1756 debug!("prefix = {:#?}", prefix);
1757 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1758 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1759 let mut inverse_memory_index = invert_mapping(&memory_index);
1761 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1762 // "outer" and "promoted" fields respectively.
1763 let b_start = (tag_index + 1) as u32;
1764 let offsets_b = offsets.split_off(b_start as usize);
1765 let offsets_a = offsets;
1767 // Disentangle the "a" and "b" components of `inverse_memory_index`
1768 // by preserving the order but keeping only one disjoint "half" each.
1769 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1770 let inverse_memory_index_b: Vec<_> =
1771 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1772 inverse_memory_index.retain(|&i| i < b_start);
1773 let inverse_memory_index_a = inverse_memory_index;
1775 // Since `inverse_memory_index_{a,b}` each only refer to their
1776 // respective fields, they can be safely inverted
1777 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1778 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1781 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1782 (outer_fields, offsets_b, memory_index_b)
1787 let mut size = prefix.size;
1788 let mut align = prefix.align;
1792 .map(|(index, variant_fields)| {
1793 // Only include overlap-eligible fields when we compute our variant layout.
1794 let variant_only_tys = variant_fields
1796 .filter(|local| match assignments[**local] {
1797 Unassigned => bug!(),
1798 Assigned(v) if v == index => true,
1799 Assigned(_) => bug!("assignment does not match variant"),
1800 Ineligible(_) => false,
1802 .map(|local| subst_field(info.field_tys[*local]));
1804 let mut variant = self.univariant_uninterned(
1807 .map(|ty| self.layout_of(ty))
1808 .collect::<Result<Vec<_>, _>>()?,
1809 &ReprOptions::default(),
1810 StructKind::Prefixed(prefix_size, prefix_align.abi),
1812 variant.variants = Variants::Single { index };
1814 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1818 // Now, stitch the promoted and variant-only fields back together in
1819 // the order they are mentioned by our GeneratorLayout.
1820 // Because we only use some subset (that can differ between variants)
1821 // of the promoted fields, we can't just pick those elements of the
1822 // `promoted_memory_index` (as we'd end up with gaps).
1823 // So instead, we build an "inverse memory_index", as if all of the
1824 // promoted fields were being used, but leave the elements not in the
1825 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1826 // obtain a valid (bijective) mapping.
1827 const INVALID_FIELD_IDX: u32 = !0;
1828 let mut combined_inverse_memory_index =
1829 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1830 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1831 let combined_offsets = variant_fields
1835 let (offset, memory_index) = match assignments[*local] {
1836 Unassigned => bug!(),
1838 let (offset, memory_index) =
1839 offsets_and_memory_index.next().unwrap();
1840 (offset, promoted_memory_index.len() as u32 + memory_index)
1842 Ineligible(field_idx) => {
1843 let field_idx = field_idx.unwrap() as usize;
1844 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1847 combined_inverse_memory_index[memory_index as usize] = i as u32;
1852 // Remove the unused slots and invert the mapping to obtain the
1853 // combined `memory_index` (also see previous comment).
1854 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1855 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1857 variant.fields = FieldsShape::Arbitrary {
1858 offsets: combined_offsets,
1859 memory_index: combined_memory_index,
1862 size = size.max(variant.size);
1863 align = align.max(variant.align);
1864 Ok(tcx.intern_layout(variant))
1866 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1868 size = size.align_to(align.abi);
1871 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1874 Abi::Aggregate { sized: true }
1877 let layout = tcx.intern_layout(LayoutS {
1878 variants: Variants::Multiple {
1880 tag_encoding: TagEncoding::Direct,
1881 tag_field: tag_index,
1884 fields: outer_fields,
1886 largest_niche: prefix.largest_niche,
1890 debug!("generator layout ({:?}): {:#?}", ty, layout);
1894 /// This is invoked by the `layout_of` query to record the final
1895 /// layout of each type.
1897 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1898 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1899 // for dumping later.
1900 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1901 self.record_layout_for_printing_outlined(layout)
1905 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1906 // Ignore layouts that are done with non-empty environments or
1907 // non-monomorphic layouts, as the user only wants to see the stuff
1908 // resulting from the final codegen session.
1909 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1913 // (delay format until we actually need it)
1914 let record = |kind, packed, opt_discr_size, variants| {
1915 let type_desc = format!("{:?}", layout.ty);
1916 self.tcx.sess.code_stats.record_type_size(
1927 let adt_def = match *layout.ty.kind() {
1928 ty::Adt(ref adt_def, _) => {
1929 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1933 ty::Closure(..) => {
1934 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1935 record(DataTypeKind::Closure, false, None, vec![]);
1940 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1945 let adt_kind = adt_def.adt_kind();
1946 let adt_packed = adt_def.repr().pack.is_some();
1948 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1949 let mut min_size = Size::ZERO;
1950 let field_info: Vec<_> = flds
1954 let field_layout = layout.field(self, i);
1955 let offset = layout.fields.offset(i);
1956 let field_end = offset + field_layout.size;
1957 if min_size < field_end {
1958 min_size = field_end;
1961 name: name.to_string(),
1962 offset: offset.bytes(),
1963 size: field_layout.size.bytes(),
1964 align: field_layout.align.abi.bytes(),
1970 name: n.map(|n| n.to_string()),
1971 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1972 align: layout.align.abi.bytes(),
1973 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1978 match layout.variants {
1979 Variants::Single { index } => {
1980 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1982 "print-type-size `{:#?}` variant {}",
1984 adt_def.variant(index).name
1986 let variant_def = &adt_def.variant(index);
1987 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1992 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1995 // (This case arises for *empty* enums; so give it
1997 record(adt_kind.into(), adt_packed, None, vec![]);
2001 Variants::Multiple { tag, ref tag_encoding, .. } => {
2003 "print-type-size `{:#?}` adt general variants def {}",
2005 adt_def.variants().len()
2007 let variant_infos: Vec<_> = adt_def
2010 .map(|(i, variant_def)| {
2011 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2013 Some(variant_def.name),
2015 layout.for_variant(self, i),
2022 match tag_encoding {
2023 TagEncoding::Direct => Some(tag.size(self)),
2033 /// Type size "skeleton", i.e., the only information determining a type's size.
2034 /// While this is conservative, (aside from constant sizes, only pointers,
2035 /// newtypes thereof and null pointer optimized enums are allowed), it is
2036 /// enough to statically check common use cases of transmute.
2037 #[derive(Copy, Clone, Debug)]
2038 pub enum SizeSkeleton<'tcx> {
2039 /// Any statically computable Layout.
2042 /// A potentially-fat pointer.
2044 /// If true, this pointer is never null.
2046 /// The type which determines the unsized metadata, if any,
2047 /// of this pointer. Either a type parameter or a projection
2048 /// depending on one, with regions erased.
2053 impl<'tcx> SizeSkeleton<'tcx> {
2057 param_env: ty::ParamEnv<'tcx>,
2058 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2059 debug_assert!(!ty.has_infer_types_or_consts());
2061 // First try computing a static layout.
2062 let err = match tcx.layout_of(param_env.and(ty)) {
2064 return Ok(SizeSkeleton::Known(layout.size));
2070 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2071 let non_zero = !ty.is_unsafe_ptr();
2072 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2074 ty::Param(_) | ty::Projection(_) => {
2075 debug_assert!(tail.has_param_types_or_consts());
2076 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2079 "SizeSkeleton::compute({}): layout errored ({}), yet \
2080 tail `{}` is not a type parameter or a projection",
2088 ty::Adt(def, substs) => {
2089 // Only newtypes and enums w/ nullable pointer optimization.
2090 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2094 // Get a zero-sized variant or a pointer newtype.
2095 let zero_or_ptr_variant = |i| {
2096 let i = VariantIdx::new(i);
2098 def.variant(i).fields.iter().map(|field| {
2099 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2102 for field in fields {
2105 SizeSkeleton::Known(size) => {
2106 if size.bytes() > 0 {
2110 SizeSkeleton::Pointer { .. } => {
2121 let v0 = zero_or_ptr_variant(0)?;
2123 if def.variants().len() == 1 {
2124 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2125 return Ok(SizeSkeleton::Pointer {
2127 || match tcx.layout_scalar_valid_range(def.did()) {
2128 (Bound::Included(start), Bound::Unbounded) => start > 0,
2129 (Bound::Included(start), Bound::Included(end)) => {
2130 0 < start && start < end
2141 let v1 = zero_or_ptr_variant(1)?;
2142 // Nullable pointer enum optimization.
2144 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2145 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2146 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2152 ty::Projection(_) | ty::Opaque(..) => {
2153 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2154 if ty == normalized {
2157 SizeSkeleton::compute(normalized, tcx, param_env)
2165 pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2166 match (self, other) {
2167 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2168 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2176 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2177 fn tcx(&self) -> TyCtxt<'tcx>;
2180 pub trait HasParamEnv<'tcx> {
2181 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2184 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2186 fn data_layout(&self) -> &TargetDataLayout {
2191 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2192 fn target_spec(&self) -> &Target {
2197 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2199 fn tcx(&self) -> TyCtxt<'tcx> {
2204 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2206 fn data_layout(&self) -> &TargetDataLayout {
2211 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2212 fn target_spec(&self) -> &Target {
2217 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2219 fn tcx(&self) -> TyCtxt<'tcx> {
2224 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2225 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2230 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2231 fn data_layout(&self) -> &TargetDataLayout {
2232 self.tcx.data_layout()
2236 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2237 fn target_spec(&self) -> &Target {
2238 self.tcx.target_spec()
2242 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2243 fn tcx(&self) -> TyCtxt<'tcx> {
2248 pub trait MaybeResult<T> {
2251 fn from(x: Result<T, Self::Error>) -> Self;
2252 fn to_result(self) -> Result<T, Self::Error>;
2255 impl<T> MaybeResult<T> for T {
2258 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2261 fn to_result(self) -> Result<T, Self::Error> {
2266 impl<T, E> MaybeResult<T> for Result<T, E> {
2269 fn from(x: Result<T, Self::Error>) -> Self {
2272 fn to_result(self) -> Result<T, Self::Error> {
2277 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2279 /// Trait for contexts that want to be able to compute layouts of types.
2280 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2281 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2282 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2283 /// returned from `layout_of` (see also `handle_layout_err`).
2284 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2286 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2287 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2289 fn layout_tcx_at_span(&self) -> Span {
2293 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2294 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2296 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2297 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2298 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2299 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2300 fn handle_layout_err(
2302 err: LayoutError<'tcx>,
2305 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2308 /// Blanket extension trait for contexts that can compute layouts of types.
2309 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2310 /// Computes the layout of a type. Note that this implicitly
2311 /// executes in "reveal all" mode, and will normalize the input type.
2313 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2314 self.spanned_layout_of(ty, DUMMY_SP)
2317 /// Computes the layout of a type, at `span`. Note that this implicitly
2318 /// executes in "reveal all" mode, and will normalize the input type.
2319 // FIXME(eddyb) avoid passing information like this, and instead add more
2320 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2322 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2323 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2324 let tcx = self.tcx().at(span);
2327 tcx.layout_of(self.param_env().and(ty))
2328 .map_err(|err| self.handle_layout_err(err, span, ty)),
2333 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2335 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2336 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2339 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2344 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2345 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2348 fn layout_tcx_at_span(&self) -> Span {
2353 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2358 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2360 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2362 fn ty_and_layout_for_variant(
2363 this: TyAndLayout<'tcx>,
2365 variant_index: VariantIdx,
2366 ) -> TyAndLayout<'tcx> {
2367 let layout = match this.variants {
2368 Variants::Single { index }
2369 // If all variants but one are uninhabited, the variant layout is the enum layout.
2370 if index == variant_index &&
2371 // Don't confuse variants of uninhabited enums with the enum itself.
2372 // For more details see https://github.com/rust-lang/rust/issues/69763.
2373 this.fields != FieldsShape::Primitive =>
2378 Variants::Single { index } => {
2380 let param_env = cx.param_env();
2382 // Deny calling for_variant more than once for non-Single enums.
2383 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2384 assert_eq!(original_layout.variants, Variants::Single { index });
2387 let fields = match this.ty.kind() {
2388 ty::Adt(def, _) if def.variants().is_empty() =>
2389 bug!("for_variant called on zero-variant enum"),
2390 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2393 tcx.intern_layout(LayoutS {
2394 variants: Variants::Single { index: variant_index },
2395 fields: match NonZeroUsize::new(fields) {
2396 Some(fields) => FieldsShape::Union(fields),
2397 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2399 abi: Abi::Uninhabited,
2400 largest_niche: None,
2401 align: tcx.data_layout.i8_align,
2406 Variants::Multiple { ref variants, .. } => variants[variant_index],
2409 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2411 TyAndLayout { ty: this.ty, layout }
2414 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2415 enum TyMaybeWithLayout<'tcx> {
2417 TyAndLayout(TyAndLayout<'tcx>),
2420 fn field_ty_or_layout<'tcx>(
2421 this: TyAndLayout<'tcx>,
2422 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2424 ) -> TyMaybeWithLayout<'tcx> {
2426 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2428 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2429 ty: tag.primitive().to_ty(tcx),
2433 match *this.ty.kind() {
2442 | ty::GeneratorWitness(..)
2444 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2446 // Potentially-fat pointers.
2447 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2448 assert!(i < this.fields.count());
2450 // Reuse the fat `*T` type as its own thin pointer data field.
2451 // This provides information about, e.g., DST struct pointees
2452 // (which may have no non-DST form), and will work as long
2453 // as the `Abi` or `FieldsShape` is checked by users.
2455 let nil = tcx.mk_unit();
2456 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2459 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2462 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2463 // the `Result` should always work because the type is
2464 // always either `*mut ()` or `&'static mut ()`.
2465 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2467 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2471 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2472 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2473 ty::Dynamic(_, _) => {
2474 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2475 tcx.lifetimes.re_static,
2476 tcx.mk_array(tcx.types.usize, 3),
2478 /* FIXME: use actual fn pointers
2479 Warning: naively computing the number of entries in the
2480 vtable by counting the methods on the trait + methods on
2481 all parent traits does not work, because some methods can
2482 be not object safe and thus excluded from the vtable.
2483 Increase this counter if you tried to implement this but
2484 failed to do it without duplicating a lot of code from
2485 other places in the compiler: 2
2487 tcx.mk_array(tcx.types.usize, 3),
2488 tcx.mk_array(Option<fn()>),
2492 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2496 // Arrays and slices.
2497 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2498 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2500 // Tuples, generators and closures.
2501 ty::Closure(_, ref substs) => field_ty_or_layout(
2502 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2507 ty::Generator(def_id, ref substs, _) => match this.variants {
2508 Variants::Single { index } => TyMaybeWithLayout::Ty(
2511 .state_tys(def_id, tcx)
2512 .nth(index.as_usize())
2517 Variants::Multiple { tag, tag_field, .. } => {
2519 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2521 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2525 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2528 ty::Adt(def, substs) => {
2529 match this.variants {
2530 Variants::Single { index } => {
2531 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2534 // Discriminant field for enums (where applicable).
2535 Variants::Multiple { tag, .. } => {
2537 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2544 | ty::Placeholder(..)
2548 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2552 match field_ty_or_layout(this, cx, i) {
2553 TyMaybeWithLayout::Ty(field_ty) => {
2554 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2556 "failed to get layout for `{}`: {},\n\
2557 despite it being a field (#{}) of an existing layout: {:#?}",
2565 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2569 fn ty_and_layout_pointee_info_at(
2570 this: TyAndLayout<'tcx>,
2573 ) -> Option<PointeeInfo> {
2575 let param_env = cx.param_env();
2577 let addr_space_of_ty = |ty: Ty<'tcx>| {
2578 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2581 let pointee_info = match *this.ty.kind() {
2582 ty::RawPtr(mt) if offset.bytes() == 0 => {
2583 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2585 align: layout.align.abi,
2587 address_space: addr_space_of_ty(mt.ty),
2590 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2591 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2593 align: layout.align.abi,
2595 address_space: cx.data_layout().instruction_address_space,
2598 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2599 let address_space = addr_space_of_ty(ty);
2600 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2601 // Use conservative pointer kind if not optimizing. This saves us the
2602 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2603 // attributes in LLVM have compile-time cost even in unoptimized builds).
2607 hir::Mutability::Not => {
2608 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2614 hir::Mutability::Mut => {
2615 // References to self-referential structures should not be considered
2616 // noalias, as another pointer to the structure can be obtained, that
2617 // is not based-on the original reference. We consider all !Unpin
2618 // types to be potentially self-referential here.
2619 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2620 PointerKind::UniqueBorrowed
2628 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2630 align: layout.align.abi,
2637 let mut data_variant = match this.variants {
2638 // Within the discriminant field, only the niche itself is
2639 // always initialized, so we only check for a pointer at its
2642 // If the niche is a pointer, it's either valid (according
2643 // to its type), or null (which the niche field's scalar
2644 // validity range encodes). This allows using
2645 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2646 // this will continue to work as long as we don't start
2647 // using more niches than just null (e.g., the first page of
2648 // the address space, or unaligned pointers).
2649 Variants::Multiple {
2650 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2653 } if this.fields.offset(tag_field) == offset => {
2654 Some(this.for_variant(cx, dataful_variant))
2659 if let Some(variant) = data_variant {
2660 // We're not interested in any unions.
2661 if let FieldsShape::Union(_) = variant.fields {
2662 data_variant = None;
2666 let mut result = None;
2668 if let Some(variant) = data_variant {
2669 let ptr_end = offset + Pointer.size(cx);
2670 for i in 0..variant.fields.count() {
2671 let field_start = variant.fields.offset(i);
2672 if field_start <= offset {
2673 let field = variant.field(cx, i);
2674 result = field.to_result().ok().and_then(|field| {
2675 if ptr_end <= field_start + field.size {
2676 // We found the right field, look inside it.
2678 field.pointee_info_at(cx, offset - field_start);
2684 if result.is_some() {
2691 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2692 if let Some(ref mut pointee) = result {
2693 if let ty::Adt(def, _) = this.ty.kind() {
2694 if def.is_box() && offset.bytes() == 0 {
2695 pointee.safe = Some(PointerKind::UniqueOwned);
2705 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2714 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2715 matches!(this.ty.kind(), ty::Adt(..))
2718 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2719 this.ty.kind() == &ty::Never
2722 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2723 matches!(this.ty.kind(), ty::Tuple(..))
2726 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2727 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2731 impl<'tcx> ty::Instance<'tcx> {
2732 // NOTE(eddyb) this is private to avoid using it from outside of
2733 // `fn_abi_of_instance` - any other uses are either too high-level
2734 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2735 // or should go through `FnAbi` instead, to avoid losing any
2736 // adjustments `fn_abi_of_instance` might be performing.
2737 fn fn_sig_for_fn_abi(
2740 param_env: ty::ParamEnv<'tcx>,
2741 ) -> ty::PolyFnSig<'tcx> {
2742 let ty = self.ty(tcx, param_env);
2745 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2746 // parameters unused if they show up in the signature, but not in the `mir::Body`
2747 // (i.e. due to being inside a projection that got normalized, see
2748 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2749 // track of a polymorphization `ParamEnv` to allow normalizing later.
2750 let mut sig = match *ty.kind() {
2751 ty::FnDef(def_id, substs) => tcx
2752 .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
2753 .subst(tcx, substs),
2754 _ => unreachable!(),
2757 if let ty::InstanceDef::VtableShim(..) = self.def {
2758 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2759 sig = sig.map_bound(|mut sig| {
2760 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2761 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2762 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2768 ty::Closure(def_id, substs) => {
2769 let sig = substs.as_closure().sig();
2771 let bound_vars = tcx.mk_bound_variable_kinds(
2774 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2776 let br = ty::BoundRegion {
2777 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2778 kind: ty::BoundRegionKind::BrEnv,
2780 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2781 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2783 let sig = sig.skip_binder();
2784 ty::Binder::bind_with_vars(
2786 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2795 ty::Generator(_, substs, _) => {
2796 let sig = substs.as_generator().poly_sig();
2798 let bound_vars = tcx.mk_bound_variable_kinds(
2801 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2803 let br = ty::BoundRegion {
2804 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2805 kind: ty::BoundRegionKind::BrEnv,
2807 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2808 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2810 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2811 let pin_adt_ref = tcx.adt_def(pin_did);
2812 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2813 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2815 let sig = sig.skip_binder();
2816 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2817 let state_adt_ref = tcx.adt_def(state_did);
2818 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2819 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2820 ty::Binder::bind_with_vars(
2822 [env_ty, sig.resume_ty].iter(),
2825 hir::Unsafety::Normal,
2826 rustc_target::spec::abi::Abi::Rust,
2831 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2836 /// Calculates whether a function's ABI can unwind or not.
2838 /// This takes two primary parameters:
2840 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2841 /// codegen attrs for a defined function. For function pointers this set of
2842 /// flags is the empty set. This is only applicable for Rust-defined
2843 /// functions, and generally isn't needed except for small optimizations where
2844 /// we try to say a function which otherwise might look like it could unwind
2845 /// doesn't actually unwind (such as for intrinsics and such).
2847 /// * `abi` - this is the ABI that the function is defined with. This is the
2848 /// primary factor for determining whether a function can unwind or not.
2850 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2851 /// panics are implemented with unwinds on most platform (when
2852 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2853 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2854 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2855 /// defined for each ABI individually, but it always corresponds to some form of
2856 /// stack-based unwinding (the exact mechanism of which varies
2857 /// platform-by-platform).
2859 /// Rust functions are classified whether or not they can unwind based on the
2860 /// active "panic strategy". In other words Rust functions are considered to
2861 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2862 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2863 /// only if the final panic mode is panic=abort. In this scenario any code
2864 /// previously compiled assuming that a function can unwind is still correct, it
2865 /// just never happens to actually unwind at runtime.
2867 /// This function's answer to whether or not a function can unwind is quite
2868 /// impactful throughout the compiler. This affects things like:
2870 /// * Calling a function which can't unwind means codegen simply ignores any
2871 /// associated unwinding cleanup.
2872 /// * Calling a function which can unwind from a function which can't unwind
2873 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2874 /// aborts the process.
2875 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2876 /// affects various optimizations and codegen.
2878 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2879 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2880 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2881 /// might (from a foreign exception or similar).
2883 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2884 if let Some(did) = fn_def_id {
2885 // Special attribute for functions which can't unwind.
2886 if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2890 // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2892 // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2893 // function defined in Rust is also required to abort.
2894 if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2898 // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2900 // This is not part of `codegen_fn_attrs` as it can differ between crates
2901 // and therefore cannot be computed in core.
2902 if tcx.sess.opts.debugging_opts.panic_in_drop == PanicStrategy::Abort {
2903 if Some(did) == tcx.lang_items().drop_in_place_fn() {
2909 // Otherwise if this isn't special then unwinding is generally determined by
2910 // the ABI of the itself. ABIs like `C` have variants which also
2911 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2912 // ABIs have such an option. Otherwise the only other thing here is Rust
2913 // itself, and those ABIs are determined by the panic strategy configured
2914 // for this compilation.
2916 // Unfortunately at this time there's also another caveat. Rust [RFC
2917 // 2945][rfc] has been accepted and is in the process of being implemented
2918 // and stabilized. In this interim state we need to deal with historical
2919 // rustc behavior as well as plan for future rustc behavior.
2921 // Historically functions declared with `extern "C"` were marked at the
2922 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2923 // or not. This is UB for functions in `panic=unwind` mode that then
2924 // actually panic and unwind. Note that this behavior is true for both
2925 // externally declared functions as well as Rust-defined function.
2927 // To fix this UB rustc would like to change in the future to catch unwinds
2928 // from function calls that may unwind within a Rust-defined `extern "C"`
2929 // function and forcibly abort the process, thereby respecting the
2930 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2931 // ready to roll out, so determining whether or not the `C` family of ABIs
2932 // unwinds is conditional not only on their definition but also whether the
2933 // `#![feature(c_unwind)]` feature gate is active.
2935 // Note that this means that unlike historical compilers rustc now, by
2936 // default, unconditionally thinks that the `C` ABI may unwind. This will
2937 // prevent some optimization opportunities, however, so we try to scope this
2938 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2939 // to `panic=abort`).
2941 // Eventually the check against `c_unwind` here will ideally get removed and
2942 // this'll be a little cleaner as it'll be a straightforward check of the
2945 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2951 | Stdcall { unwind }
2952 | Fastcall { unwind }
2953 | Vectorcall { unwind }
2954 | Thiscall { unwind }
2957 | SysV64 { unwind } => {
2959 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2967 | AvrNonBlockingInterrupt
2968 | CCmseNonSecureCall
2972 | Unadjusted => false,
2973 Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2978 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2979 use rustc_target::spec::abi::Abi::*;
2980 match tcx.sess.target.adjust_abi(abi) {
2981 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2982 RustCold => Conv::RustCold,
2984 // It's the ABI's job to select this, not ours.
2985 System { .. } => bug!("system abi should be selected elsewhere"),
2986 EfiApi => bug!("eficall abi should be selected elsewhere"),
2988 Stdcall { .. } => Conv::X86Stdcall,
2989 Fastcall { .. } => Conv::X86Fastcall,
2990 Vectorcall { .. } => Conv::X86VectorCall,
2991 Thiscall { .. } => Conv::X86ThisCall,
2992 C { .. } => Conv::C,
2993 Unadjusted => Conv::C,
2994 Win64 { .. } => Conv::X86_64Win64,
2995 SysV64 { .. } => Conv::X86_64SysV,
2996 Aapcs { .. } => Conv::ArmAapcs,
2997 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2998 PtxKernel => Conv::PtxKernel,
2999 Msp430Interrupt => Conv::Msp430Intr,
3000 X86Interrupt => Conv::X86Intr,
3001 AmdGpuKernel => Conv::AmdGpuKernel,
3002 AvrInterrupt => Conv::AvrInterrupt,
3003 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3006 // These API constants ought to be more specific...
3007 Cdecl { .. } => Conv::C,
3011 /// Error produced by attempting to compute or adjust a `FnAbi`.
3012 #[derive(Copy, Clone, Debug, HashStable)]
3013 pub enum FnAbiError<'tcx> {
3014 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3015 Layout(LayoutError<'tcx>),
3017 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3018 AdjustForForeignAbi(call::AdjustForForeignAbiError),
3021 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3022 fn from(err: LayoutError<'tcx>) -> Self {
3027 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3028 fn from(err: call::AdjustForForeignAbiError) -> Self {
3029 Self::AdjustForForeignAbi(err)
3033 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3034 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3036 Self::Layout(err) => err.fmt(f),
3037 Self::AdjustForForeignAbi(err) => err.fmt(f),
3042 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3043 // just for error handling.
3045 pub enum FnAbiRequest<'tcx> {
3046 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3047 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3050 /// Trait for contexts that want to be able to compute `FnAbi`s.
3051 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3052 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3053 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3054 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3055 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3057 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3058 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3060 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3061 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3062 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3063 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3064 fn handle_fn_abi_err(
3066 err: FnAbiError<'tcx>,
3068 fn_abi_request: FnAbiRequest<'tcx>,
3069 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3072 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3073 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3074 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3076 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3077 /// instead, where the instance is an `InstanceDef::Virtual`.
3079 fn fn_abi_of_fn_ptr(
3081 sig: ty::PolyFnSig<'tcx>,
3082 extra_args: &'tcx ty::List<Ty<'tcx>>,
3083 ) -> Self::FnAbiOfResult {
3084 // FIXME(eddyb) get a better `span` here.
3085 let span = self.layout_tcx_at_span();
3086 let tcx = self.tcx().at(span);
3088 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3089 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3093 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3094 /// direct calls to an `fn`.
3096 /// NB: that includes virtual calls, which are represented by "direct calls"
3097 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3099 fn fn_abi_of_instance(
3101 instance: ty::Instance<'tcx>,
3102 extra_args: &'tcx ty::List<Ty<'tcx>>,
3103 ) -> Self::FnAbiOfResult {
3104 // FIXME(eddyb) get a better `span` here.
3105 let span = self.layout_tcx_at_span();
3106 let tcx = self.tcx().at(span);
3109 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3110 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3111 // we can get some kind of span even if one wasn't provided.
3112 // However, we don't do this early in order to avoid calling
3113 // `def_span` unconditionally (which may have a perf penalty).
3114 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3115 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3121 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3123 fn fn_abi_of_fn_ptr<'tcx>(
3125 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3126 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3127 let (param_env, (sig, extra_args)) = query.into_parts();
3129 LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3132 fn fn_abi_of_instance<'tcx>(
3134 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3135 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3136 let (param_env, (instance, extra_args)) = query.into_parts();
3138 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3140 let caller_location = if instance.def.requires_caller_location(tcx) {
3141 Some(tcx.caller_location_ty())
3146 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3150 Some(instance.def_id()),
3151 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3155 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3156 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3157 // arguments of this method, into a separate `struct`.
3158 fn fn_abi_new_uncached(
3160 sig: ty::PolyFnSig<'tcx>,
3161 extra_args: &[Ty<'tcx>],
3162 caller_location: Option<Ty<'tcx>>,
3163 fn_def_id: Option<DefId>,
3164 // FIXME(eddyb) replace this with something typed, like an `enum`.
3165 force_thin_self_ptr: bool,
3166 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3167 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3169 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3171 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3173 let mut inputs = sig.inputs();
3174 let extra_args = if sig.abi == RustCall {
3175 assert!(!sig.c_variadic && extra_args.is_empty());
3177 if let Some(input) = sig.inputs().last() {
3178 if let ty::Tuple(tupled_arguments) = input.kind() {
3179 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3183 "argument to function with \"rust-call\" ABI \
3189 "argument to function with \"rust-call\" ABI \
3194 assert!(sig.c_variadic || extra_args.is_empty());
3198 let target = &self.tcx.sess.target;
3199 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3200 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3201 let linux_s390x_gnu_like =
3202 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3203 let linux_sparc64_gnu_like =
3204 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3205 let linux_powerpc_gnu_like =
3206 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3208 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3210 // Handle safe Rust thin and fat pointers.
3211 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3213 layout: TyAndLayout<'tcx>,
3216 // Booleans are always a noundef i1 that needs to be zero-extended.
3217 if scalar.is_bool() {
3218 attrs.ext(ArgExtension::Zext);
3219 attrs.set(ArgAttribute::NoUndef);
3223 // Scalars which have invalid values cannot be undef.
3224 if !scalar.is_always_valid(self) {
3225 attrs.set(ArgAttribute::NoUndef);
3228 // Only pointer types handled below.
3229 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3231 if !valid_range.contains(0) {
3232 attrs.set(ArgAttribute::NonNull);
3235 if let Some(pointee) = layout.pointee_info_at(self, offset) {
3236 if let Some(kind) = pointee.safe {
3237 attrs.pointee_align = Some(pointee.align);
3239 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3240 // for the entire duration of the function as they can be deallocated
3241 // at any time. Set their valid size to 0.
3242 attrs.pointee_size = match kind {
3243 PointerKind::UniqueOwned => Size::ZERO,
3247 // `Box`, `&T`, and `&mut T` cannot be undef.
3248 // Note that this only applies to the value of the pointer itself;
3249 // this attribute doesn't make it UB for the pointed-to data to be undef.
3250 attrs.set(ArgAttribute::NoUndef);
3252 // `Box` pointer parameters never alias because ownership is transferred
3253 // `&mut` pointer parameters never alias other parameters,
3254 // or mutable global data
3256 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3257 // and can be marked as both `readonly` and `noalias`, as
3258 // LLVM's definition of `noalias` is based solely on memory
3259 // dependencies rather than pointer equality
3261 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3262 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3263 // or not to actually emit the attribute. It can also be controlled with the
3264 // `-Zmutable-noalias` debugging option.
3265 let no_alias = match kind {
3266 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3267 PointerKind::UniqueOwned => true,
3268 PointerKind::Frozen => !is_return,
3271 attrs.set(ArgAttribute::NoAlias);
3274 if kind == PointerKind::Frozen && !is_return {
3275 attrs.set(ArgAttribute::ReadOnly);
3278 if kind == PointerKind::UniqueBorrowed && !is_return {
3279 attrs.set(ArgAttribute::NoAliasMutRef);
3285 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3286 let is_return = arg_idx.is_none();
3288 let layout = self.layout_of(ty)?;
3289 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3290 // Don't pass the vtable, it's not an argument of the virtual fn.
3291 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3292 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3293 make_thin_self_ptr(self, layout)
3298 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3299 let mut attrs = ArgAttributes::new();
3300 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3304 if arg.layout.is_zst() {
3305 // For some forsaken reason, x86_64-pc-windows-gnu
3306 // doesn't ignore zero-sized struct arguments.
3307 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3311 && !linux_s390x_gnu_like
3312 && !linux_sparc64_gnu_like
3313 && !linux_powerpc_gnu_like)
3315 arg.mode = PassMode::Ignore;
3322 let mut fn_abi = FnAbi {
3323 ret: arg_of(sig.output(), None)?,
3327 .chain(extra_args.iter().copied())
3328 .chain(caller_location)
3330 .map(|(i, ty)| arg_of(ty, Some(i)))
3331 .collect::<Result<_, _>>()?,
3332 c_variadic: sig.c_variadic,
3333 fixed_count: inputs.len(),
3335 can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3337 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3338 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3339 Ok(self.tcx.arena.alloc(fn_abi))
3342 fn fn_abi_adjust_for_abi(
3344 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3346 ) -> Result<(), FnAbiError<'tcx>> {
3347 if abi == SpecAbi::Unadjusted {
3351 if abi == SpecAbi::Rust
3352 || abi == SpecAbi::RustCall
3353 || abi == SpecAbi::RustIntrinsic
3354 || abi == SpecAbi::PlatformIntrinsic
3356 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3357 if arg.is_ignore() {
3361 match arg.layout.abi {
3362 Abi::Aggregate { .. } => {}
3364 // This is a fun case! The gist of what this is doing is
3365 // that we want callers and callees to always agree on the
3366 // ABI of how they pass SIMD arguments. If we were to *not*
3367 // make these arguments indirect then they'd be immediates
3368 // in LLVM, which means that they'd used whatever the
3369 // appropriate ABI is for the callee and the caller. That
3370 // means, for example, if the caller doesn't have AVX
3371 // enabled but the callee does, then passing an AVX argument
3372 // across this boundary would cause corrupt data to show up.
3374 // This problem is fixed by unconditionally passing SIMD
3375 // arguments through memory between callers and callees
3376 // which should get them all to agree on ABI regardless of
3377 // target feature sets. Some more information about this
3378 // issue can be found in #44367.
3380 // Note that the platform intrinsic ABI is exempt here as
3381 // that's how we connect up to LLVM and it's unstable
3382 // anyway, we control all calls to it in libstd.
3384 if abi != SpecAbi::PlatformIntrinsic
3385 && self.tcx.sess.target.simd_types_indirect =>
3387 arg.make_indirect();
3394 let size = arg.layout.size;
3395 if arg.layout.is_unsized() || size > Pointer.size(self) {
3396 arg.make_indirect();
3398 // We want to pass small aggregates as immediates, but using
3399 // a LLVM aggregate type for this leads to bad optimizations,
3400 // so we pick an appropriately sized integer type instead.
3401 arg.cast_to(Reg { kind: RegKind::Integer, size });
3404 fixup(&mut fn_abi.ret);
3405 for arg in &mut fn_abi.args {
3409 fn_abi.adjust_for_foreign_abi(self, abi)?;
3416 fn make_thin_self_ptr<'tcx>(
3417 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3418 layout: TyAndLayout<'tcx>,
3419 ) -> TyAndLayout<'tcx> {
3421 let fat_pointer_ty = if layout.is_unsized() {
3422 // unsized `self` is passed as a pointer to `self`
3423 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3424 tcx.mk_mut_ptr(layout.ty)
3427 Abi::ScalarPair(..) => (),
3428 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3431 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3432 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3433 // elsewhere in the compiler as a method on a `dyn Trait`.
3434 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3435 // get a built-in pointer type
3436 let mut fat_pointer_layout = layout;
3437 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3438 && !fat_pointer_layout.ty.is_region_ptr()
3440 for i in 0..fat_pointer_layout.fields.count() {
3441 let field_layout = fat_pointer_layout.field(cx, i);
3443 if !field_layout.is_zst() {
3444 fat_pointer_layout = field_layout;
3445 continue 'descend_newtypes;
3449 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3452 fat_pointer_layout.ty
3455 // we now have a type like `*mut RcBox<dyn Trait>`
3456 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3457 // this is understood as a special case elsewhere in the compiler
3458 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3463 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3464 // should always work because the type is always `*mut ()`.
3465 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()