1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
6 self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
10 use rustc_attr as attr;
12 use rustc_hir::def_id::DefId;
13 use rustc_hir::lang_items::LangItem;
14 use rustc_index::bit_set::BitSet;
15 use rustc_index::vec::{Idx, IndexVec};
16 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
17 use rustc_span::symbol::Symbol;
18 use rustc_span::{Span, DUMMY_SP};
19 use rustc_target::abi::call::{
20 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
22 use rustc_target::abi::*;
23 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
25 use std::cmp::{self, Ordering};
28 use std::num::NonZeroUsize;
31 use rand::{seq::SliceRandom, SeedableRng};
32 use rand_xoshiro::Xoshiro128StarStar;
34 pub fn provide(providers: &mut ty::query::Providers) {
36 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
39 pub trait IntegerExt {
40 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
41 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
43 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
53 impl IntegerExt for Integer {
55 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
56 match (*self, signed) {
57 (I8, false) => tcx.types.u8,
58 (I16, false) => tcx.types.u16,
59 (I32, false) => tcx.types.u32,
60 (I64, false) => tcx.types.u64,
61 (I128, false) => tcx.types.u128,
62 (I8, true) => tcx.types.i8,
63 (I16, true) => tcx.types.i16,
64 (I32, true) => tcx.types.i32,
65 (I64, true) => tcx.types.i64,
66 (I128, true) => tcx.types.i128,
70 /// Gets the Integer type from an attr::IntType.
71 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
72 let dl = cx.data_layout();
75 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
76 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
77 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
78 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
79 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
80 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
81 dl.ptr_sized_integer()
86 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
89 ty::IntTy::I16 => I16,
90 ty::IntTy::I32 => I32,
91 ty::IntTy::I64 => I64,
92 ty::IntTy::I128 => I128,
93 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
96 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
99 ty::UintTy::U16 => I16,
100 ty::UintTy::U32 => I32,
101 ty::UintTy::U64 => I64,
102 ty::UintTy::U128 => I128,
103 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
107 /// Finds the appropriate Integer type and signedness for the given
108 /// signed discriminant range and `#[repr]` attribute.
109 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
110 /// that shouldn't affect anything, other than maybe debuginfo.
117 ) -> (Integer, bool) {
118 // Theoretically, negative values could be larger in unsigned representation
119 // than the unsigned representation of the signed minimum. However, if there
120 // are any negative values, the only valid unsigned representation is u128
121 // which can fit all i128 values, so the result remains unaffected.
122 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
123 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
125 if let Some(ity) = repr.int {
126 let discr = Integer::from_attr(&tcx, ity);
127 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
130 "Integer::repr_discr: `#[repr]` hint too small for \
131 discriminant range of enum `{}",
135 return (discr, ity.is_signed());
138 let at_least = if repr.c() {
139 // This is usually I32, however it can be different on some platforms,
140 // notably hexagon and arm-none/thumb-none
141 tcx.data_layout().c_enum_min_size
143 // repr(Rust) enums try to be as small as possible
147 // If there are no negative values, we can use the unsigned fit.
149 (cmp::max(unsigned_fit, at_least), false)
151 (cmp::max(signed_fit, at_least), true)
156 pub trait PrimitiveExt {
157 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
161 impl PrimitiveExt for Primitive {
163 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
165 Int(i, signed) => i.to_ty(tcx, signed),
166 F32 => tcx.types.f32,
167 F64 => tcx.types.f64,
168 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
172 /// Return an *integer* type matching this primitive.
173 /// Useful in particular when dealing with enum discriminants.
175 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
177 Int(i, signed) => i.to_ty(tcx, signed),
178 Pointer => tcx.types.usize,
179 F32 | F64 => bug!("floats do not have an int type"),
184 /// The first half of a fat pointer.
186 /// - For a trait object, this is the address of the box.
187 /// - For a slice, this is the base address.
188 pub const FAT_PTR_ADDR: usize = 0;
190 /// The second half of a fat pointer.
192 /// - For a trait object, this is the address of the vtable.
193 /// - For a slice, this is the length.
194 pub const FAT_PTR_EXTRA: usize = 1;
196 /// The maximum supported number of lanes in a SIMD vector.
198 /// This value is selected based on backend support:
199 /// * LLVM does not appear to have a vector width limit.
200 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
201 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
203 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
204 pub enum LayoutError<'tcx> {
206 SizeOverflow(Ty<'tcx>),
207 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
210 impl<'tcx> fmt::Display for LayoutError<'tcx> {
211 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
213 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
214 LayoutError::SizeOverflow(ty) => {
215 write!(f, "values of the type `{}` are too big for the current architecture", ty)
217 LayoutError::NormalizationFailure(t, e) => write!(
219 "unable to determine layout for `{}` because `{}` cannot be normalized",
221 e.get_type_for_failure()
227 #[instrument(skip(tcx, query), level = "debug")]
230 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
231 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
232 let (param_env, ty) = query.into_parts();
235 let param_env = param_env.with_reveal_all_normalized(tcx);
236 let unnormalized_ty = ty;
238 // FIXME: We might want to have two different versions of `layout_of`:
239 // One that can be called after typecheck has completed and can use
240 // `normalize_erasing_regions` here and another one that can be called
241 // before typecheck has completed and uses `try_normalize_erasing_regions`.
242 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
244 Err(normalization_error) => {
245 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
249 if ty != unnormalized_ty {
250 // Ensure this layout is also cached for the normalized type.
251 return tcx.layout_of(param_env.and(ty));
254 let cx = LayoutCx { tcx, param_env };
256 let layout = cx.layout_of_uncached(ty)?;
257 let layout = TyAndLayout { ty, layout };
259 cx.record_layout_for_printing(layout);
261 sanity_check_layout(&cx, &layout);
266 #[derive(Clone, Copy)]
267 pub struct LayoutCx<'tcx, C> {
269 pub param_env: ty::ParamEnv<'tcx>,
272 #[derive(Copy, Clone, Debug)]
274 /// A tuple, closure, or univariant which cannot be coerced to unsized.
276 /// A univariant, the last field of which may be coerced to unsized.
278 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
279 Prefixed(Size, Align),
282 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
283 // This is used to go between `memory_index` (source field order to memory order)
284 // and `inverse_memory_index` (memory order to source field order).
285 // See also `FieldsShape::Arbitrary::memory_index` for more details.
286 // FIXME(eddyb) build a better abstraction for permutations, if possible.
287 fn invert_mapping(map: &[u32]) -> Vec<u32> {
288 let mut inverse = vec![0; map.len()];
289 for i in 0..map.len() {
290 inverse[map[i] as usize] = i as u32;
295 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
296 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
297 let dl = self.data_layout();
298 let b_align = b.align(dl);
299 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
300 let b_offset = a.size(dl).align_to(b_align.abi);
301 let size = (b_offset + b.size(dl)).align_to(align.abi);
303 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
304 // returns the last maximum.
305 let largest_niche = Niche::from_scalar(dl, b_offset, b)
307 .chain(Niche::from_scalar(dl, Size::ZERO, a))
308 .max_by_key(|niche| niche.available(dl));
311 variants: Variants::Single { index: VariantIdx::new(0) },
312 fields: FieldsShape::Arbitrary {
313 offsets: vec![Size::ZERO, b_offset],
314 memory_index: vec![0, 1],
316 abi: Abi::ScalarPair(a, b),
323 fn univariant_uninterned(
326 fields: &[TyAndLayout<'_>],
329 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
330 let dl = self.data_layout();
331 let pack = repr.pack;
332 if pack.is_some() && repr.align.is_some() {
333 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
334 return Err(LayoutError::Unknown(ty));
337 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
339 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
341 let optimize = !repr.inhibit_struct_field_reordering_opt();
344 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
345 let optimizing = &mut inverse_memory_index[..end];
346 let field_align = |f: &TyAndLayout<'_>| {
347 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
350 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
351 // the field ordering to try and catch some code making assumptions about layouts
352 // we don't guarantee
353 if repr.can_randomize_type_layout() {
354 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
355 // randomize field ordering with
356 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
358 // Shuffle the ordering of the fields
359 optimizing.shuffle(&mut rng);
361 // Otherwise we just leave things alone and actually optimize the type's fields
364 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
365 optimizing.sort_by_key(|&x| {
366 // Place ZSTs first to avoid "interesting offsets",
367 // especially with only one or two non-ZST fields.
368 let f = &fields[x as usize];
369 (!f.is_zst(), cmp::Reverse(field_align(f)))
373 StructKind::Prefixed(..) => {
374 // Sort in ascending alignment so that the layout stays optimal
375 // regardless of the prefix
376 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
380 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
381 // regardless of the status of `-Z randomize-layout`
385 // inverse_memory_index holds field indices by increasing memory offset.
386 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
387 // We now write field offsets to the corresponding offset slot;
388 // field 5 with offset 0 puts 0 in offsets[5].
389 // At the bottom of this function, we invert `inverse_memory_index` to
390 // produce `memory_index` (see `invert_mapping`).
392 let mut sized = true;
393 let mut offsets = vec![Size::ZERO; fields.len()];
394 let mut offset = Size::ZERO;
395 let mut largest_niche = None;
396 let mut largest_niche_available = 0;
398 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
400 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
401 align = align.max(AbiAndPrefAlign::new(prefix_align));
402 offset = prefix_size.align_to(prefix_align);
405 for &i in &inverse_memory_index {
406 let field = fields[i as usize];
408 self.tcx.sess.delay_span_bug(
411 "univariant: field #{} of `{}` comes after unsized field",
418 if field.is_unsized() {
422 // Invariant: offset < dl.obj_size_bound() <= 1<<61
423 let field_align = if let Some(pack) = pack {
424 field.align.min(AbiAndPrefAlign::new(pack))
428 offset = offset.align_to(field_align.abi);
429 align = align.max(field_align);
431 debug!("univariant offset: {:?} field: {:#?}", offset, field);
432 offsets[i as usize] = offset;
434 if let Some(mut niche) = field.largest_niche {
435 let available = niche.available(dl);
436 if available > largest_niche_available {
437 largest_niche_available = available;
438 niche.offset += offset;
439 largest_niche = Some(niche);
443 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
446 if let Some(repr_align) = repr.align {
447 align = align.max(AbiAndPrefAlign::new(repr_align));
450 debug!("univariant min_size: {:?}", offset);
451 let min_size = offset;
453 // As stated above, inverse_memory_index holds field indices by increasing offset.
454 // This makes it an already-sorted view of the offsets vec.
455 // To invert it, consider:
456 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
457 // Field 5 would be the first element, so memory_index is i:
458 // Note: if we didn't optimize, it's already right.
461 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
463 let size = min_size.align_to(align.abi);
464 let mut abi = Abi::Aggregate { sized };
466 // Unpack newtype ABIs and find scalar pairs.
467 if sized && size.bytes() > 0 {
468 // All other fields must be ZSTs.
469 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
471 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
472 // We have exactly one non-ZST field.
473 (Some((i, field)), None, None) => {
474 // Field fills the struct and it has a scalar or scalar pair ABI.
475 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
478 // For plain scalars, or vectors of them, we can't unpack
479 // newtypes for `#[repr(C)]`, as that affects C ABIs.
480 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
483 // But scalar pairs are Rust-specific and get
484 // treated as aggregates by C ABIs anyway.
485 Abi::ScalarPair(..) => {
493 // Two non-ZST fields, and they're both scalars.
494 (Some((i, a)), Some((j, b)), None) => {
495 match (a.abi, b.abi) {
496 (Abi::Scalar(a), Abi::Scalar(b)) => {
497 // Order by the memory placement, not source order.
498 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
503 let pair = self.scalar_pair(a, b);
504 let pair_offsets = match pair.fields {
505 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
506 assert_eq!(memory_index, &[0, 1]);
511 if offsets[i] == pair_offsets[0]
512 && offsets[j] == pair_offsets[1]
513 && align == pair.align
516 // We can use `ScalarPair` only when it matches our
517 // already computed layout (including `#[repr(C)]`).
529 if fields.iter().any(|f| f.abi.is_uninhabited()) {
530 abi = Abi::Uninhabited;
534 variants: Variants::Single { index: VariantIdx::new(0) },
535 fields: FieldsShape::Arbitrary { offsets, memory_index },
543 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
545 let param_env = self.param_env;
546 let dl = self.data_layout();
547 let scalar_unit = |value: Primitive| {
548 let size = value.size(dl);
549 assert!(size.bits() <= 128);
550 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
553 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
555 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
556 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
558 debug_assert!(!ty.has_infer_types_or_consts());
560 Ok(match *ty.kind() {
562 ty::Bool => tcx.intern_layout(LayoutS::scalar(
564 Scalar::Initialized {
565 value: Int(I8, false),
566 valid_range: WrappingRange { start: 0, end: 1 },
569 ty::Char => tcx.intern_layout(LayoutS::scalar(
571 Scalar::Initialized {
572 value: Int(I32, false),
573 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
576 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
577 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
578 ty::Float(fty) => scalar(match fty {
579 ty::FloatTy::F32 => F32,
580 ty::FloatTy::F64 => F64,
583 let mut ptr = scalar_unit(Pointer);
584 ptr.valid_range_mut().start = 1;
585 tcx.intern_layout(LayoutS::scalar(self, ptr))
589 ty::Never => tcx.intern_layout(LayoutS {
590 variants: Variants::Single { index: VariantIdx::new(0) },
591 fields: FieldsShape::Primitive,
592 abi: Abi::Uninhabited,
598 // Potentially-wide pointers.
599 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
600 let mut data_ptr = scalar_unit(Pointer);
601 if !ty.is_unsafe_ptr() {
602 data_ptr.valid_range_mut().start = 1;
605 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
606 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
607 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
610 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
611 let metadata = match unsized_part.kind() {
613 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
615 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
617 let mut vtable = scalar_unit(Pointer);
618 vtable.valid_range_mut().start = 1;
621 _ => return Err(LayoutError::Unknown(unsized_part)),
624 // Effectively a (ptr, meta) tuple.
625 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
628 ty::Dynamic(_, _, ty::DynStar) => {
629 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
630 data.valid_range_mut().start = 0;
631 let mut vtable = scalar_unit(Pointer);
632 vtable.valid_range_mut().start = 1;
633 tcx.intern_layout(self.scalar_pair(data, vtable))
636 // Arrays and slices.
637 ty::Array(element, mut count) => {
638 if count.has_projections() {
639 count = tcx.normalize_erasing_regions(param_env, count);
640 if count.has_projections() {
641 return Err(LayoutError::Unknown(ty));
645 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
646 let element = self.layout_of(element)?;
648 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
651 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
654 Abi::Aggregate { sized: true }
657 let largest_niche = if count != 0 { element.largest_niche } else { None };
659 tcx.intern_layout(LayoutS {
660 variants: Variants::Single { index: VariantIdx::new(0) },
661 fields: FieldsShape::Array { stride: element.size, count },
664 align: element.align,
668 ty::Slice(element) => {
669 let element = self.layout_of(element)?;
670 tcx.intern_layout(LayoutS {
671 variants: Variants::Single { index: VariantIdx::new(0) },
672 fields: FieldsShape::Array { stride: element.size, count: 0 },
673 abi: Abi::Aggregate { sized: false },
675 align: element.align,
679 ty::Str => tcx.intern_layout(LayoutS {
680 variants: Variants::Single { index: VariantIdx::new(0) },
681 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
682 abi: Abi::Aggregate { sized: false },
689 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
690 ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
691 let mut unit = self.univariant_uninterned(
694 &ReprOptions::default(),
695 StructKind::AlwaysSized,
698 Abi::Aggregate { ref mut sized } => *sized = false,
701 tcx.intern_layout(unit)
704 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
706 ty::Closure(_, ref substs) => {
707 let tys = substs.as_closure().upvar_tys();
709 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
710 &ReprOptions::default(),
711 StructKind::AlwaysSized,
717 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
720 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
721 &ReprOptions::default(),
726 // SIMD vector types.
727 ty::Adt(def, substs) if def.repr().simd() => {
728 if !def.is_struct() {
729 // Should have yielded E0517 by now.
730 tcx.sess.delay_span_bug(
732 "#[repr(simd)] was applied to an ADT that is not a struct",
734 return Err(LayoutError::Unknown(ty));
737 // Supported SIMD vectors are homogeneous ADTs with at least one field:
739 // * #[repr(simd)] struct S(T, T, T, T);
740 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
741 // * #[repr(simd)] struct S([T; 4])
743 // where T is a primitive scalar (integer/float/pointer).
745 // SIMD vectors with zero fields are not supported.
746 // (should be caught by typeck)
747 if def.non_enum_variant().fields.is_empty() {
748 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
751 // Type of the first ADT field:
752 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
754 // Heterogeneous SIMD vectors are not supported:
755 // (should be caught by typeck)
756 for fi in &def.non_enum_variant().fields {
757 if fi.ty(tcx, substs) != f0_ty {
758 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
762 // The element type and number of elements of the SIMD vector
763 // are obtained from:
765 // * the element type and length of the single array field, if
766 // the first field is of array type, or
768 // * the homogeneous field type and the number of fields.
769 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
770 // First ADT field is an array:
772 // SIMD vectors with multiple array fields are not supported:
773 // (should be caught by typeck)
774 if def.non_enum_variant().fields.len() != 1 {
775 tcx.sess.fatal(&format!(
776 "monomorphising SIMD type `{}` with more than one array field",
781 // Extract the number of elements from the layout of the array field:
782 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
783 return Err(LayoutError::Unknown(ty));
786 (*e_ty, *count, true)
788 // First ADT field is not an array:
789 (f0_ty, def.non_enum_variant().fields.len() as _, false)
792 // SIMD vectors of zero length are not supported.
793 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
796 // Can't be caught in typeck if the array length is generic.
798 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
799 } else if e_len > MAX_SIMD_LANES {
800 tcx.sess.fatal(&format!(
801 "monomorphising SIMD type `{}` of length greater than {}",
806 // Compute the ABI of the element type:
807 let e_ly = self.layout_of(e_ty)?;
808 let Abi::Scalar(e_abi) = e_ly.abi else {
809 // This error isn't caught in typeck, e.g., if
810 // the element type of the vector is generic.
811 tcx.sess.fatal(&format!(
812 "monomorphising SIMD type `{}` with a non-primitive-scalar \
813 (integer/float/pointer) element type `{}`",
818 // Compute the size and alignment of the vector:
819 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
820 let align = dl.vector_align(size);
821 let size = size.align_to(align.abi);
823 // Compute the placement of the vector fields:
824 let fields = if is_array {
825 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
827 FieldsShape::Array { stride: e_ly.size, count: e_len }
830 tcx.intern_layout(LayoutS {
831 variants: Variants::Single { index: VariantIdx::new(0) },
833 abi: Abi::Vector { element: e_abi, count: e_len },
834 largest_niche: e_ly.largest_niche,
841 ty::Adt(def, substs) => {
842 // Cache the field layouts.
849 .map(|field| self.layout_of(field.ty(tcx, substs)))
850 .collect::<Result<Vec<_>, _>>()
852 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
855 if def.repr().pack.is_some() && def.repr().align.is_some() {
856 self.tcx.sess.delay_span_bug(
857 tcx.def_span(def.did()),
858 "union cannot be packed and aligned",
860 return Err(LayoutError::Unknown(ty));
864 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
866 if let Some(repr_align) = def.repr().align {
867 align = align.max(AbiAndPrefAlign::new(repr_align));
870 let optimize = !def.repr().inhibit_union_abi_opt();
871 let mut size = Size::ZERO;
872 let mut abi = Abi::Aggregate { sized: true };
873 let index = VariantIdx::new(0);
874 for field in &variants[index] {
875 assert!(!field.is_unsized());
876 align = align.max(field.align);
878 // If all non-ZST fields have the same ABI, forward this ABI
879 if optimize && !field.is_zst() {
880 // Discard valid range information and allow undef
881 let field_abi = match field.abi {
882 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
883 Abi::ScalarPair(x, y) => {
884 Abi::ScalarPair(x.to_union(), y.to_union())
886 Abi::Vector { element: x, count } => {
887 Abi::Vector { element: x.to_union(), count }
889 Abi::Uninhabited | Abi::Aggregate { .. } => {
890 Abi::Aggregate { sized: true }
894 if size == Size::ZERO {
895 // first non ZST: initialize 'abi'
897 } else if abi != field_abi {
898 // different fields have different ABI: reset to Aggregate
899 abi = Abi::Aggregate { sized: true };
903 size = cmp::max(size, field.size);
906 if let Some(pack) = def.repr().pack {
907 align = align.min(AbiAndPrefAlign::new(pack));
910 return Ok(tcx.intern_layout(LayoutS {
911 variants: Variants::Single { index },
912 fields: FieldsShape::Union(
913 NonZeroUsize::new(variants[index].len())
914 .ok_or(LayoutError::Unknown(ty))?,
919 size: size.align_to(align.abi),
923 // A variant is absent if it's uninhabited and only has ZST fields.
924 // Present uninhabited variants only require space for their fields,
925 // but *not* an encoding of the discriminant (e.g., a tag value).
926 // See issue #49298 for more details on the need to leave space
927 // for non-ZST uninhabited data (mostly partial initialization).
928 let absent = |fields: &[TyAndLayout<'_>]| {
929 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
930 let is_zst = fields.iter().all(|f| f.is_zst());
931 uninhabited && is_zst
933 let (present_first, present_second) = {
934 let mut present_variants = variants
936 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
937 (present_variants.next(), present_variants.next())
939 let present_first = match present_first {
940 Some(present_first) => present_first,
941 // Uninhabited because it has no variants, or only absent ones.
942 None if def.is_enum() => {
943 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
945 // If it's a struct, still compute a layout so that we can still compute the
947 None => VariantIdx::new(0),
950 let is_struct = !def.is_enum() ||
951 // Only one variant is present.
952 (present_second.is_none() &&
953 // Representation optimizations are allowed.
954 !def.repr().inhibit_enum_layout_opt());
956 // Struct, or univariant enum equivalent to a struct.
957 // (Typechecking will reject discriminant-sizing attrs.)
959 let v = present_first;
960 let kind = if def.is_enum() || variants[v].is_empty() {
961 StructKind::AlwaysSized
963 let param_env = tcx.param_env(def.did());
964 let last_field = def.variant(v).fields.last().unwrap();
966 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
968 StructKind::MaybeUnsized
970 StructKind::AlwaysSized
974 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
975 st.variants = Variants::Single { index: v };
977 if def.is_unsafe_cell() {
978 let hide_niches = |scalar: &mut _| match scalar {
979 Scalar::Initialized { value, valid_range } => {
980 *valid_range = WrappingRange::full(value.size(dl))
982 // Already doesn't have any niches
983 Scalar::Union { .. } => {}
986 Abi::Uninhabited => {}
987 Abi::Scalar(scalar) => hide_niches(scalar),
988 Abi::ScalarPair(a, b) => {
992 Abi::Vector { element, count: _ } => hide_niches(element),
993 Abi::Aggregate { sized: _ } => {}
995 st.largest_niche = None;
996 return Ok(tcx.intern_layout(st));
999 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1001 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1002 // the asserts ensure that we are not using the
1003 // `#[rustc_layout_scalar_valid_range(n)]`
1004 // attribute to widen the range of anything as that would probably
1005 // result in UB somewhere
1006 // FIXME(eddyb) the asserts are probably not needed,
1007 // as larger validity ranges would result in missed
1008 // optimizations, *not* wrongly assuming the inner
1009 // value is valid. e.g. unions enlarge validity ranges,
1010 // because the values may be uninitialized.
1011 if let Bound::Included(start) = start {
1012 // FIXME(eddyb) this might be incorrect - it doesn't
1013 // account for wrap-around (end < start) ranges.
1014 let valid_range = scalar.valid_range_mut();
1015 assert!(valid_range.start <= start);
1016 valid_range.start = start;
1018 if let Bound::Included(end) = end {
1019 // FIXME(eddyb) this might be incorrect - it doesn't
1020 // account for wrap-around (end < start) ranges.
1021 let valid_range = scalar.valid_range_mut();
1022 assert!(valid_range.end >= end);
1023 valid_range.end = end;
1026 // Update `largest_niche` if we have introduced a larger niche.
1027 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1028 if let Some(niche) = niche {
1029 match st.largest_niche {
1030 Some(largest_niche) => {
1031 // Replace the existing niche even if they're equal,
1032 // because this one is at a lower offset.
1033 if largest_niche.available(dl) <= niche.available(dl) {
1034 st.largest_niche = Some(niche);
1037 None => st.largest_niche = Some(niche),
1042 start == Bound::Unbounded && end == Bound::Unbounded,
1043 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1049 return Ok(tcx.intern_layout(st));
1052 // At this point, we have handled all unions and
1053 // structs. (We have also handled univariant enums
1054 // that allow representation optimization.)
1055 assert!(def.is_enum());
1057 // Until we've decided whether to use the tagged or
1058 // niche filling LayoutS, we don't want to intern the
1059 // variant layouts, so we can't store them in the
1060 // overall LayoutS. Store the overall LayoutS
1061 // and the variant LayoutSs here until then.
1062 struct TmpLayout<'tcx> {
1063 layout: LayoutS<'tcx>,
1064 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1067 let calculate_niche_filling_layout =
1068 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1069 // The current code for niche-filling relies on variant indices
1070 // instead of actual discriminants, so enums with
1071 // explicit discriminants (RFC #2363) would misbehave.
1072 if def.repr().inhibit_enum_layout_opt()
1076 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1081 if variants.len() < 2 {
1085 let mut align = dl.aggregate_align;
1086 let mut variant_layouts = variants
1089 let mut st = self.univariant_uninterned(
1093 StructKind::AlwaysSized,
1095 st.variants = Variants::Single { index: j };
1097 align = align.max(st.align);
1101 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1103 let largest_variant_index = match variant_layouts
1105 .max_by_key(|(_i, layout)| layout.size.bytes())
1106 .map(|(i, _layout)| i)
1108 None => return Ok(None),
1112 let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1113 let needs_disc = |index: VariantIdx| {
1114 index != largest_variant_index && !absent(&variants[index])
1116 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1117 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1119 let count = niche_variants.size_hint().1.unwrap() as u128;
1121 // Find the field with the largest niche
1122 let (field_index, niche, (niche_start, niche_scalar)) = match variants
1123 [largest_variant_index]
1126 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1127 .max_by_key(|(_, niche)| niche.available(dl))
1128 .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1130 None => return Ok(None),
1134 let niche_offset = niche.offset
1135 + variant_layouts[largest_variant_index].fields.offset(field_index);
1136 let niche_size = niche.value.size(dl);
1137 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1139 let all_variants_fit =
1140 variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1141 if i == largest_variant_index {
1145 layout.largest_niche = None;
1147 if layout.size <= niche_offset {
1148 // This variant will fit before the niche.
1152 // Determine if it'll fit after the niche.
1153 let this_align = layout.align.abi;
1154 let this_offset = (niche_offset + niche_size).align_to(this_align);
1156 if this_offset + layout.size > size {
1160 // It'll fit, but we need to make some adjustments.
1161 match layout.fields {
1162 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1163 for (j, offset) in offsets.iter_mut().enumerate() {
1164 if !variants[i][j].is_zst() {
1165 *offset += this_offset;
1170 panic!("Layout of fields should be Arbitrary for variants")
1174 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1175 if !layout.abi.is_uninhabited() {
1176 layout.abi = Abi::Aggregate { sized: true };
1178 layout.size += this_offset;
1183 if !all_variants_fit {
1187 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1189 let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1190 i == largest_variant_index || layout.size == Size::ZERO
1192 let same_size = size == variant_layouts[largest_variant_index].size;
1193 let same_align = align == variant_layouts[largest_variant_index].align;
1195 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1197 } else if same_size && same_align && others_zst {
1198 match variant_layouts[largest_variant_index].abi {
1199 // When the total alignment and size match, we can use the
1200 // same ABI as the scalar variant with the reserved niche.
1201 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1202 Abi::ScalarPair(first, second) => {
1203 // Only the niche is guaranteed to be initialised,
1204 // so use union layouts for the other primitive.
1205 if niche_offset == Size::ZERO {
1206 Abi::ScalarPair(niche_scalar, second.to_union())
1208 Abi::ScalarPair(first.to_union(), niche_scalar)
1211 _ => Abi::Aggregate { sized: true },
1214 Abi::Aggregate { sized: true }
1217 let layout = LayoutS {
1218 variants: Variants::Multiple {
1220 tag_encoding: TagEncoding::Niche {
1221 untagged_variant: largest_variant_index,
1226 variants: IndexVec::new(),
1228 fields: FieldsShape::Arbitrary {
1229 offsets: vec![niche_offset],
1230 memory_index: vec![0],
1238 Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1241 let niche_filling_layout = calculate_niche_filling_layout()?;
1243 let (mut min, mut max) = (i128::MAX, i128::MIN);
1244 let discr_type = def.repr().discr_type();
1245 let bits = Integer::from_attr(self, discr_type).size().bits();
1246 for (i, discr) in def.discriminants(tcx) {
1247 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1250 let mut x = discr.val as i128;
1251 if discr_type.is_signed() {
1252 // sign extend the raw representation to be an i128
1253 x = (x << (128 - bits)) >> (128 - bits);
1262 // We might have no inhabited variants, so pretend there's at least one.
1263 if (min, max) == (i128::MAX, i128::MIN) {
1267 assert!(min <= max, "discriminant range is {}...{}", min, max);
1268 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1270 let mut align = dl.aggregate_align;
1271 let mut size = Size::ZERO;
1273 // We're interested in the smallest alignment, so start large.
1274 let mut start_align = Align::from_bytes(256).unwrap();
1275 assert_eq!(Integer::for_align(dl, start_align), None);
1277 // repr(C) on an enum tells us to make a (tag, union) layout,
1278 // so we need to grow the prefix alignment to be at least
1279 // the alignment of the union. (This value is used both for
1280 // determining the alignment of the overall enum, and the
1281 // determining the alignment of the payload after the tag.)
1282 let mut prefix_align = min_ity.align(dl).abi;
1284 for fields in &variants {
1285 for field in fields {
1286 prefix_align = prefix_align.max(field.align.abi);
1291 // Create the set of structs that represent each variant.
1292 let mut layout_variants = variants
1294 .map(|(i, field_layouts)| {
1295 let mut st = self.univariant_uninterned(
1299 StructKind::Prefixed(min_ity.size(), prefix_align),
1301 st.variants = Variants::Single { index: i };
1302 // Find the first field we can't move later
1303 // to make room for a larger discriminant.
1305 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1307 if !field.is_zst() || field.align.abi.bytes() != 1 {
1308 start_align = start_align.min(field.align.abi);
1312 size = cmp::max(size, st.size);
1313 align = align.max(st.align);
1316 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1318 // Align the maximum variant size to the largest alignment.
1319 size = size.align_to(align.abi);
1321 if size.bytes() >= dl.obj_size_bound() {
1322 return Err(LayoutError::SizeOverflow(ty));
1325 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1326 if typeck_ity < min_ity {
1327 // It is a bug if Layout decided on a greater discriminant size than typeck for
1328 // some reason at this point (based on values discriminant can take on). Mostly
1329 // because this discriminant will be loaded, and then stored into variable of
1330 // type calculated by typeck. Consider such case (a bug): typeck decided on
1331 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1332 // discriminant values. That would be a bug, because then, in codegen, in order
1333 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1334 // space necessary to represent would have to be discarded (or layout is wrong
1335 // on thinking it needs 16 bits)
1337 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1341 // However, it is fine to make discr type however large (as an optimisation)
1342 // after this point – we’ll just truncate the value we load in codegen.
1345 // Check to see if we should use a different type for the
1346 // discriminant. We can safely use a type with the same size
1347 // as the alignment of the first field of each variant.
1348 // We increase the size of the discriminant to avoid LLVM copying
1349 // padding when it doesn't need to. This normally causes unaligned
1350 // load/stores and excessive memcpy/memset operations. By using a
1351 // bigger integer size, LLVM can be sure about its contents and
1352 // won't be so conservative.
1354 // Use the initial field alignment
1355 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1358 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1361 // If the alignment is not larger than the chosen discriminant size,
1362 // don't use the alignment as the final size.
1366 // Patch up the variants' first few fields.
1367 let old_ity_size = min_ity.size();
1368 let new_ity_size = ity.size();
1369 for variant in &mut layout_variants {
1370 match variant.fields {
1371 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1373 if *i <= old_ity_size {
1374 assert_eq!(*i, old_ity_size);
1378 // We might be making the struct larger.
1379 if variant.size <= old_ity_size {
1380 variant.size = new_ity_size;
1388 let tag_mask = ity.size().unsigned_int_max();
1389 let tag = Scalar::Initialized {
1390 value: Int(ity, signed),
1391 valid_range: WrappingRange {
1392 start: (min as u128 & tag_mask),
1393 end: (max as u128 & tag_mask),
1396 let mut abi = Abi::Aggregate { sized: true };
1398 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1399 abi = Abi::Uninhabited;
1400 } else if tag.size(dl) == size {
1401 // Make sure we only use scalar layout when the enum is entirely its
1402 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1403 abi = Abi::Scalar(tag);
1405 // Try to use a ScalarPair for all tagged enums.
1406 let mut common_prim = None;
1407 let mut common_prim_initialized_in_all_variants = true;
1408 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1409 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1413 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1414 let (field, offset) = match (fields.next(), fields.next()) {
1416 common_prim_initialized_in_all_variants = false;
1419 (Some(pair), None) => pair,
1425 let prim = match field.abi {
1426 Abi::Scalar(scalar) => {
1427 common_prim_initialized_in_all_variants &=
1428 matches!(scalar, Scalar::Initialized { .. });
1436 if let Some(pair) = common_prim {
1437 // This is pretty conservative. We could go fancier
1438 // by conflating things like i32 and u32, or even
1439 // realising that (u8, u8) could just cohabit with
1441 if pair != (prim, offset) {
1446 common_prim = Some((prim, offset));
1449 if let Some((prim, offset)) = common_prim {
1450 let prim_scalar = if common_prim_initialized_in_all_variants {
1453 // Common prim might be uninit.
1454 Scalar::Union { value: prim }
1456 let pair = self.scalar_pair(tag, prim_scalar);
1457 let pair_offsets = match pair.fields {
1458 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1459 assert_eq!(memory_index, &[0, 1]);
1464 if pair_offsets[0] == Size::ZERO
1465 && pair_offsets[1] == *offset
1466 && align == pair.align
1467 && size == pair.size
1469 // We can use `ScalarPair` only when it matches our
1470 // already computed layout (including `#[repr(C)]`).
1476 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1477 // variants to ensure they are consistent. This is because a downcast is
1478 // semantically a NOP, and thus should not affect layout.
1479 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1480 for variant in &mut layout_variants {
1481 // We only do this for variants with fields; the others are not accessed anyway.
1482 // Also do not overwrite any already existing "clever" ABIs.
1483 if variant.fields.count() > 0
1484 && matches!(variant.abi, Abi::Aggregate { .. })
1487 // Also need to bump up the size and alignment, so that the entire value fits in here.
1488 variant.size = cmp::max(variant.size, size);
1489 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1494 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1496 let tagged_layout = LayoutS {
1497 variants: Variants::Multiple {
1499 tag_encoding: TagEncoding::Direct,
1501 variants: IndexVec::new(),
1503 fields: FieldsShape::Arbitrary {
1504 offsets: vec![Size::ZERO],
1505 memory_index: vec![0],
1513 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1515 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1517 // Pick the smaller layout; otherwise,
1518 // pick the layout with the larger niche; otherwise,
1519 // pick tagged as it has simpler codegen.
1521 let niche_size = |tmp_l: &TmpLayout<'_>| {
1522 tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1525 tl.layout.size.cmp(&nl.layout.size),
1526 niche_size(&tl).cmp(&niche_size(&nl)),
1529 (Equal, Less) => nl,
1536 // Now we can intern the variant layouts and store them in the enum layout.
1537 best_layout.layout.variants = match best_layout.layout.variants {
1538 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1542 variants: best_layout
1545 .map(|layout| tcx.intern_layout(layout))
1551 tcx.intern_layout(best_layout.layout)
1554 // Types with no meaningful known layout.
1555 ty::Projection(_) | ty::Opaque(..) => {
1556 // NOTE(eddyb) `layout_of` query should've normalized these away,
1557 // if that was possible, so there's no reason to try again here.
1558 return Err(LayoutError::Unknown(ty));
1561 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1562 bug!("Layout::compute: unexpected type `{}`", ty)
1565 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1566 return Err(LayoutError::Unknown(ty));
1572 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1573 #[derive(Clone, Debug, PartialEq)]
1574 enum SavedLocalEligibility {
1576 Assigned(VariantIdx),
1577 // FIXME: Use newtype_index so we aren't wasting bytes
1578 Ineligible(Option<u32>),
1581 // When laying out generators, we divide our saved local fields into two
1582 // categories: overlap-eligible and overlap-ineligible.
1584 // Those fields which are ineligible for overlap go in a "prefix" at the
1585 // beginning of the layout, and always have space reserved for them.
1587 // Overlap-eligible fields are only assigned to one variant, so we lay
1588 // those fields out for each variant and put them right after the
1591 // Finally, in the layout details, we point to the fields from the
1592 // variants they are assigned to. It is possible for some fields to be
1593 // included in multiple variants. No field ever "moves around" in the
1594 // layout; its offset is always the same.
1596 // Also included in the layout are the upvars and the discriminant.
1597 // These are included as fields on the "outer" layout; they are not part
1599 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1600 /// Compute the eligibility and assignment of each local.
1601 fn generator_saved_local_eligibility(
1603 info: &GeneratorLayout<'tcx>,
1604 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1605 use SavedLocalEligibility::*;
1607 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1608 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1610 // The saved locals not eligible for overlap. These will get
1611 // "promoted" to the prefix of our generator.
1612 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1614 // Figure out which of our saved locals are fields in only
1615 // one variant. The rest are deemed ineligible for overlap.
1616 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1617 for local in fields {
1618 match assignments[*local] {
1620 assignments[*local] = Assigned(variant_index);
1623 // We've already seen this local at another suspension
1624 // point, so it is no longer a candidate.
1626 "removing local {:?} in >1 variant ({:?}, {:?})",
1631 ineligible_locals.insert(*local);
1632 assignments[*local] = Ineligible(None);
1639 // Next, check every pair of eligible locals to see if they
1641 for local_a in info.storage_conflicts.rows() {
1642 let conflicts_a = info.storage_conflicts.count(local_a);
1643 if ineligible_locals.contains(local_a) {
1647 for local_b in info.storage_conflicts.iter(local_a) {
1648 // local_a and local_b are storage live at the same time, therefore they
1649 // cannot overlap in the generator layout. The only way to guarantee
1650 // this is if they are in the same variant, or one is ineligible
1651 // (which means it is stored in every variant).
1652 if ineligible_locals.contains(local_b)
1653 || assignments[local_a] == assignments[local_b]
1658 // If they conflict, we will choose one to make ineligible.
1659 // This is not always optimal; it's just a greedy heuristic that
1660 // seems to produce good results most of the time.
1661 let conflicts_b = info.storage_conflicts.count(local_b);
1662 let (remove, other) =
1663 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1664 ineligible_locals.insert(remove);
1665 assignments[remove] = Ineligible(None);
1666 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1670 // Count the number of variants in use. If only one of them, then it is
1671 // impossible to overlap any locals in our layout. In this case it's
1672 // always better to make the remaining locals ineligible, so we can
1673 // lay them out with the other locals in the prefix and eliminate
1674 // unnecessary padding bytes.
1676 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1677 for assignment in &assignments {
1678 if let Assigned(idx) = assignment {
1679 used_variants.insert(*idx);
1682 if used_variants.count() < 2 {
1683 for assignment in assignments.iter_mut() {
1684 *assignment = Ineligible(None);
1686 ineligible_locals.insert_all();
1690 // Write down the order of our locals that will be promoted to the prefix.
1692 for (idx, local) in ineligible_locals.iter().enumerate() {
1693 assignments[local] = Ineligible(Some(idx as u32));
1696 debug!("generator saved local assignments: {:?}", assignments);
1698 (ineligible_locals, assignments)
1701 /// Compute the full generator layout.
1702 fn generator_layout(
1705 def_id: hir::def_id::DefId,
1706 substs: SubstsRef<'tcx>,
1707 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1708 use SavedLocalEligibility::*;
1710 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1712 let Some(info) = tcx.generator_layout(def_id) else {
1713 return Err(LayoutError::Unknown(ty));
1715 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1717 // Build a prefix layout, including "promoting" all ineligible
1718 // locals as part of the prefix. We compute the layout of all of
1719 // these fields at once to get optimal packing.
1720 let tag_index = substs.as_generator().prefix_tys().count();
1722 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1723 let max_discr = (info.variant_fields.len() - 1) as u128;
1724 let discr_int = Integer::fit_unsigned(max_discr);
1725 let discr_int_ty = discr_int.to_ty(tcx, false);
1726 let tag = Scalar::Initialized {
1727 value: Primitive::Int(discr_int, false),
1728 valid_range: WrappingRange { start: 0, end: max_discr },
1730 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1731 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1733 let promoted_layouts = ineligible_locals
1735 .map(|local| subst_field(info.field_tys[local]))
1736 .map(|ty| tcx.mk_maybe_uninit(ty))
1737 .map(|ty| self.layout_of(ty));
1738 let prefix_layouts = substs
1741 .map(|ty| self.layout_of(ty))
1742 .chain(iter::once(Ok(tag_layout)))
1743 .chain(promoted_layouts)
1744 .collect::<Result<Vec<_>, _>>()?;
1745 let prefix = self.univariant_uninterned(
1748 &ReprOptions::default(),
1749 StructKind::AlwaysSized,
1752 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1754 // Split the prefix layout into the "outer" fields (upvars and
1755 // discriminant) and the "promoted" fields. Promoted fields will
1756 // get included in each variant that requested them in
1758 debug!("prefix = {:#?}", prefix);
1759 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1760 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1761 let mut inverse_memory_index = invert_mapping(&memory_index);
1763 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1764 // "outer" and "promoted" fields respectively.
1765 let b_start = (tag_index + 1) as u32;
1766 let offsets_b = offsets.split_off(b_start as usize);
1767 let offsets_a = offsets;
1769 // Disentangle the "a" and "b" components of `inverse_memory_index`
1770 // by preserving the order but keeping only one disjoint "half" each.
1771 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1772 let inverse_memory_index_b: Vec<_> =
1773 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1774 inverse_memory_index.retain(|&i| i < b_start);
1775 let inverse_memory_index_a = inverse_memory_index;
1777 // Since `inverse_memory_index_{a,b}` each only refer to their
1778 // respective fields, they can be safely inverted
1779 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1780 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1783 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1784 (outer_fields, offsets_b, memory_index_b)
1789 let mut size = prefix.size;
1790 let mut align = prefix.align;
1794 .map(|(index, variant_fields)| {
1795 // Only include overlap-eligible fields when we compute our variant layout.
1796 let variant_only_tys = variant_fields
1798 .filter(|local| match assignments[**local] {
1799 Unassigned => bug!(),
1800 Assigned(v) if v == index => true,
1801 Assigned(_) => bug!("assignment does not match variant"),
1802 Ineligible(_) => false,
1804 .map(|local| subst_field(info.field_tys[*local]));
1806 let mut variant = self.univariant_uninterned(
1809 .map(|ty| self.layout_of(ty))
1810 .collect::<Result<Vec<_>, _>>()?,
1811 &ReprOptions::default(),
1812 StructKind::Prefixed(prefix_size, prefix_align.abi),
1814 variant.variants = Variants::Single { index };
1816 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1820 // Now, stitch the promoted and variant-only fields back together in
1821 // the order they are mentioned by our GeneratorLayout.
1822 // Because we only use some subset (that can differ between variants)
1823 // of the promoted fields, we can't just pick those elements of the
1824 // `promoted_memory_index` (as we'd end up with gaps).
1825 // So instead, we build an "inverse memory_index", as if all of the
1826 // promoted fields were being used, but leave the elements not in the
1827 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1828 // obtain a valid (bijective) mapping.
1829 const INVALID_FIELD_IDX: u32 = !0;
1830 let mut combined_inverse_memory_index =
1831 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1832 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1833 let combined_offsets = variant_fields
1837 let (offset, memory_index) = match assignments[*local] {
1838 Unassigned => bug!(),
1840 let (offset, memory_index) =
1841 offsets_and_memory_index.next().unwrap();
1842 (offset, promoted_memory_index.len() as u32 + memory_index)
1844 Ineligible(field_idx) => {
1845 let field_idx = field_idx.unwrap() as usize;
1846 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1849 combined_inverse_memory_index[memory_index as usize] = i as u32;
1854 // Remove the unused slots and invert the mapping to obtain the
1855 // combined `memory_index` (also see previous comment).
1856 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1857 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1859 variant.fields = FieldsShape::Arbitrary {
1860 offsets: combined_offsets,
1861 memory_index: combined_memory_index,
1864 size = size.max(variant.size);
1865 align = align.max(variant.align);
1866 Ok(tcx.intern_layout(variant))
1868 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1870 size = size.align_to(align.abi);
1873 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1876 Abi::Aggregate { sized: true }
1879 let layout = tcx.intern_layout(LayoutS {
1880 variants: Variants::Multiple {
1882 tag_encoding: TagEncoding::Direct,
1883 tag_field: tag_index,
1886 fields: outer_fields,
1888 largest_niche: prefix.largest_niche,
1892 debug!("generator layout ({:?}): {:#?}", ty, layout);
1896 /// This is invoked by the `layout_of` query to record the final
1897 /// layout of each type.
1899 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1900 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1901 // for dumping later.
1902 if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1903 self.record_layout_for_printing_outlined(layout)
1907 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1908 // Ignore layouts that are done with non-empty environments or
1909 // non-monomorphic layouts, as the user only wants to see the stuff
1910 // resulting from the final codegen session.
1911 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1915 // (delay format until we actually need it)
1916 let record = |kind, packed, opt_discr_size, variants| {
1917 let type_desc = format!("{:?}", layout.ty);
1918 self.tcx.sess.code_stats.record_type_size(
1929 let adt_def = match *layout.ty.kind() {
1930 ty::Adt(ref adt_def, _) => {
1931 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1935 ty::Closure(..) => {
1936 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1937 record(DataTypeKind::Closure, false, None, vec![]);
1942 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1947 let adt_kind = adt_def.adt_kind();
1948 let adt_packed = adt_def.repr().pack.is_some();
1950 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1951 let mut min_size = Size::ZERO;
1952 let field_info: Vec<_> = flds
1956 let field_layout = layout.field(self, i);
1957 let offset = layout.fields.offset(i);
1958 let field_end = offset + field_layout.size;
1959 if min_size < field_end {
1960 min_size = field_end;
1964 offset: offset.bytes(),
1965 size: field_layout.size.bytes(),
1966 align: field_layout.align.abi.bytes(),
1973 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1974 align: layout.align.abi.bytes(),
1975 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1980 match layout.variants {
1981 Variants::Single { index } => {
1982 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1984 "print-type-size `{:#?}` variant {}",
1986 adt_def.variant(index).name
1988 let variant_def = &adt_def.variant(index);
1989 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1994 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1997 // (This case arises for *empty* enums; so give it
1999 record(adt_kind.into(), adt_packed, None, vec![]);
2003 Variants::Multiple { tag, ref tag_encoding, .. } => {
2005 "print-type-size `{:#?}` adt general variants def {}",
2007 adt_def.variants().len()
2009 let variant_infos: Vec<_> = adt_def
2012 .map(|(i, variant_def)| {
2013 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2015 Some(variant_def.name),
2017 layout.for_variant(self, i),
2024 match tag_encoding {
2025 TagEncoding::Direct => Some(tag.size(self)),
2035 /// Type size "skeleton", i.e., the only information determining a type's size.
2036 /// While this is conservative, (aside from constant sizes, only pointers,
2037 /// newtypes thereof and null pointer optimized enums are allowed), it is
2038 /// enough to statically check common use cases of transmute.
2039 #[derive(Copy, Clone, Debug)]
2040 pub enum SizeSkeleton<'tcx> {
2041 /// Any statically computable Layout.
2044 /// A potentially-fat pointer.
2046 /// If true, this pointer is never null.
2048 /// The type which determines the unsized metadata, if any,
2049 /// of this pointer. Either a type parameter or a projection
2050 /// depending on one, with regions erased.
2055 impl<'tcx> SizeSkeleton<'tcx> {
2059 param_env: ty::ParamEnv<'tcx>,
2060 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2061 debug_assert!(!ty.has_infer_types_or_consts());
2063 // First try computing a static layout.
2064 let err = match tcx.layout_of(param_env.and(ty)) {
2066 return Ok(SizeSkeleton::Known(layout.size));
2072 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2073 let non_zero = !ty.is_unsafe_ptr();
2074 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2076 ty::Param(_) | ty::Projection(_) => {
2077 debug_assert!(tail.has_param_types_or_consts());
2078 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2081 "SizeSkeleton::compute({}): layout errored ({}), yet \
2082 tail `{}` is not a type parameter or a projection",
2090 ty::Adt(def, substs) => {
2091 // Only newtypes and enums w/ nullable pointer optimization.
2092 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2096 // Get a zero-sized variant or a pointer newtype.
2097 let zero_or_ptr_variant = |i| {
2098 let i = VariantIdx::new(i);
2100 def.variant(i).fields.iter().map(|field| {
2101 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2104 for field in fields {
2107 SizeSkeleton::Known(size) => {
2108 if size.bytes() > 0 {
2112 SizeSkeleton::Pointer { .. } => {
2123 let v0 = zero_or_ptr_variant(0)?;
2125 if def.variants().len() == 1 {
2126 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2127 return Ok(SizeSkeleton::Pointer {
2129 || match tcx.layout_scalar_valid_range(def.did()) {
2130 (Bound::Included(start), Bound::Unbounded) => start > 0,
2131 (Bound::Included(start), Bound::Included(end)) => {
2132 0 < start && start < end
2143 let v1 = zero_or_ptr_variant(1)?;
2144 // Nullable pointer enum optimization.
2146 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2147 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2148 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2154 ty::Projection(_) | ty::Opaque(..) => {
2155 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2156 if ty == normalized {
2159 SizeSkeleton::compute(normalized, tcx, param_env)
2167 pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2168 match (self, other) {
2169 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2170 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2178 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2179 fn tcx(&self) -> TyCtxt<'tcx>;
2182 pub trait HasParamEnv<'tcx> {
2183 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2186 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2188 fn data_layout(&self) -> &TargetDataLayout {
2193 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2194 fn target_spec(&self) -> &Target {
2199 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2201 fn tcx(&self) -> TyCtxt<'tcx> {
2206 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2208 fn data_layout(&self) -> &TargetDataLayout {
2213 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2214 fn target_spec(&self) -> &Target {
2219 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2221 fn tcx(&self) -> TyCtxt<'tcx> {
2226 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2227 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2232 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2233 fn data_layout(&self) -> &TargetDataLayout {
2234 self.tcx.data_layout()
2238 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2239 fn target_spec(&self) -> &Target {
2240 self.tcx.target_spec()
2244 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2245 fn tcx(&self) -> TyCtxt<'tcx> {
2250 pub trait MaybeResult<T> {
2253 fn from(x: Result<T, Self::Error>) -> Self;
2254 fn to_result(self) -> Result<T, Self::Error>;
2257 impl<T> MaybeResult<T> for T {
2260 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2263 fn to_result(self) -> Result<T, Self::Error> {
2268 impl<T, E> MaybeResult<T> for Result<T, E> {
2271 fn from(x: Result<T, Self::Error>) -> Self {
2274 fn to_result(self) -> Result<T, Self::Error> {
2279 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2281 /// Trait for contexts that want to be able to compute layouts of types.
2282 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2283 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2284 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2285 /// returned from `layout_of` (see also `handle_layout_err`).
2286 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2288 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2289 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2291 fn layout_tcx_at_span(&self) -> Span {
2295 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2296 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2298 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2299 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2300 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2301 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2302 fn handle_layout_err(
2304 err: LayoutError<'tcx>,
2307 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2310 /// Blanket extension trait for contexts that can compute layouts of types.
2311 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2312 /// Computes the layout of a type. Note that this implicitly
2313 /// executes in "reveal all" mode, and will normalize the input type.
2315 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2316 self.spanned_layout_of(ty, DUMMY_SP)
2319 /// Computes the layout of a type, at `span`. Note that this implicitly
2320 /// executes in "reveal all" mode, and will normalize the input type.
2321 // FIXME(eddyb) avoid passing information like this, and instead add more
2322 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2324 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2325 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2326 let tcx = self.tcx().at(span);
2329 tcx.layout_of(self.param_env().and(ty))
2330 .map_err(|err| self.handle_layout_err(err, span, ty)),
2335 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2337 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2338 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2341 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2346 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2347 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2350 fn layout_tcx_at_span(&self) -> Span {
2355 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2360 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2362 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2364 fn ty_and_layout_for_variant(
2365 this: TyAndLayout<'tcx>,
2367 variant_index: VariantIdx,
2368 ) -> TyAndLayout<'tcx> {
2369 let layout = match this.variants {
2370 Variants::Single { index }
2371 // If all variants but one are uninhabited, the variant layout is the enum layout.
2372 if index == variant_index &&
2373 // Don't confuse variants of uninhabited enums with the enum itself.
2374 // For more details see https://github.com/rust-lang/rust/issues/69763.
2375 this.fields != FieldsShape::Primitive =>
2380 Variants::Single { index } => {
2382 let param_env = cx.param_env();
2384 // Deny calling for_variant more than once for non-Single enums.
2385 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2386 assert_eq!(original_layout.variants, Variants::Single { index });
2389 let fields = match this.ty.kind() {
2390 ty::Adt(def, _) if def.variants().is_empty() =>
2391 bug!("for_variant called on zero-variant enum"),
2392 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2395 tcx.intern_layout(LayoutS {
2396 variants: Variants::Single { index: variant_index },
2397 fields: match NonZeroUsize::new(fields) {
2398 Some(fields) => FieldsShape::Union(fields),
2399 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2401 abi: Abi::Uninhabited,
2402 largest_niche: None,
2403 align: tcx.data_layout.i8_align,
2408 Variants::Multiple { ref variants, .. } => variants[variant_index],
2411 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2413 TyAndLayout { ty: this.ty, layout }
2416 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2417 enum TyMaybeWithLayout<'tcx> {
2419 TyAndLayout(TyAndLayout<'tcx>),
2422 fn field_ty_or_layout<'tcx>(
2423 this: TyAndLayout<'tcx>,
2424 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2426 ) -> TyMaybeWithLayout<'tcx> {
2428 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2430 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2431 ty: tag.primitive().to_ty(tcx),
2435 match *this.ty.kind() {
2444 | ty::GeneratorWitness(..)
2446 | ty::Dynamic(_, _, ty::Dyn) => {
2447 bug!("TyAndLayout::field({:?}): not applicable", this)
2450 // Potentially-fat pointers.
2451 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2452 assert!(i < this.fields.count());
2454 // Reuse the fat `*T` type as its own thin pointer data field.
2455 // This provides information about, e.g., DST struct pointees
2456 // (which may have no non-DST form), and will work as long
2457 // as the `Abi` or `FieldsShape` is checked by users.
2459 let nil = tcx.mk_unit();
2460 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2463 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2466 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2467 // the `Result` should always work because the type is
2468 // always either `*mut ()` or `&'static mut ()`.
2469 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2471 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2475 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2476 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2477 ty::Dynamic(_, _, ty::Dyn) => {
2478 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2479 tcx.lifetimes.re_static,
2480 tcx.mk_array(tcx.types.usize, 3),
2482 /* FIXME: use actual fn pointers
2483 Warning: naively computing the number of entries in the
2484 vtable by counting the methods on the trait + methods on
2485 all parent traits does not work, because some methods can
2486 be not object safe and thus excluded from the vtable.
2487 Increase this counter if you tried to implement this but
2488 failed to do it without duplicating a lot of code from
2489 other places in the compiler: 2
2491 tcx.mk_array(tcx.types.usize, 3),
2492 tcx.mk_array(Option<fn()>),
2496 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2500 // Arrays and slices.
2501 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2502 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2504 // Tuples, generators and closures.
2505 ty::Closure(_, ref substs) => field_ty_or_layout(
2506 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2511 ty::Generator(def_id, ref substs, _) => match this.variants {
2512 Variants::Single { index } => TyMaybeWithLayout::Ty(
2515 .state_tys(def_id, tcx)
2516 .nth(index.as_usize())
2521 Variants::Multiple { tag, tag_field, .. } => {
2523 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2525 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2529 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2532 ty::Adt(def, substs) => {
2533 match this.variants {
2534 Variants::Single { index } => {
2535 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2538 // Discriminant field for enums (where applicable).
2539 Variants::Multiple { tag, .. } => {
2541 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2546 ty::Dynamic(_, _, ty::DynStar) => {
2548 TyMaybeWithLayout::Ty(tcx.types.usize)
2550 // FIXME(dyn-star) same FIXME as above applies here too
2551 TyMaybeWithLayout::Ty(
2553 tcx.lifetimes.re_static,
2554 tcx.mk_array(tcx.types.usize, 3),
2558 bug!("no field {i} on dyn*")
2564 | ty::Placeholder(..)
2568 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2572 match field_ty_or_layout(this, cx, i) {
2573 TyMaybeWithLayout::Ty(field_ty) => {
2574 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2576 "failed to get layout for `{}`: {},\n\
2577 despite it being a field (#{}) of an existing layout: {:#?}",
2585 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2589 fn ty_and_layout_pointee_info_at(
2590 this: TyAndLayout<'tcx>,
2593 ) -> Option<PointeeInfo> {
2595 let param_env = cx.param_env();
2597 let addr_space_of_ty = |ty: Ty<'tcx>| {
2598 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2601 let pointee_info = match *this.ty.kind() {
2602 ty::RawPtr(mt) if offset.bytes() == 0 => {
2603 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2605 align: layout.align.abi,
2607 address_space: addr_space_of_ty(mt.ty),
2610 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2611 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2613 align: layout.align.abi,
2615 address_space: cx.data_layout().instruction_address_space,
2618 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2619 let address_space = addr_space_of_ty(ty);
2620 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2621 // Use conservative pointer kind if not optimizing. This saves us the
2622 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2623 // attributes in LLVM have compile-time cost even in unoptimized builds).
2624 PointerKind::SharedMutable
2627 hir::Mutability::Not => {
2628 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2631 PointerKind::SharedMutable
2634 hir::Mutability::Mut => {
2635 // References to self-referential structures should not be considered
2636 // noalias, as another pointer to the structure can be obtained, that
2637 // is not based-on the original reference. We consider all !Unpin
2638 // types to be potentially self-referential here.
2639 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2640 PointerKind::UniqueBorrowed
2642 PointerKind::UniqueBorrowedPinned
2648 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2650 align: layout.align.abi,
2657 let mut data_variant = match this.variants {
2658 // Within the discriminant field, only the niche itself is
2659 // always initialized, so we only check for a pointer at its
2662 // If the niche is a pointer, it's either valid (according
2663 // to its type), or null (which the niche field's scalar
2664 // validity range encodes). This allows using
2665 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2666 // this will continue to work as long as we don't start
2667 // using more niches than just null (e.g., the first page of
2668 // the address space, or unaligned pointers).
2669 Variants::Multiple {
2670 tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2673 } if this.fields.offset(tag_field) == offset => {
2674 Some(this.for_variant(cx, untagged_variant))
2679 if let Some(variant) = data_variant {
2680 // We're not interested in any unions.
2681 if let FieldsShape::Union(_) = variant.fields {
2682 data_variant = None;
2686 let mut result = None;
2688 if let Some(variant) = data_variant {
2689 let ptr_end = offset + Pointer.size(cx);
2690 for i in 0..variant.fields.count() {
2691 let field_start = variant.fields.offset(i);
2692 if field_start <= offset {
2693 let field = variant.field(cx, i);
2694 result = field.to_result().ok().and_then(|field| {
2695 if ptr_end <= field_start + field.size {
2696 // We found the right field, look inside it.
2698 field.pointee_info_at(cx, offset - field_start);
2704 if result.is_some() {
2711 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2712 if let Some(ref mut pointee) = result {
2713 if let ty::Adt(def, _) = this.ty.kind() {
2714 if def.is_box() && offset.bytes() == 0 {
2715 pointee.safe = Some(PointerKind::UniqueOwned);
2725 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2734 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2735 matches!(this.ty.kind(), ty::Adt(..))
2738 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2739 this.ty.kind() == &ty::Never
2742 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2743 matches!(this.ty.kind(), ty::Tuple(..))
2746 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2747 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2751 impl<'tcx> ty::Instance<'tcx> {
2752 // NOTE(eddyb) this is private to avoid using it from outside of
2753 // `fn_abi_of_instance` - any other uses are either too high-level
2754 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2755 // or should go through `FnAbi` instead, to avoid losing any
2756 // adjustments `fn_abi_of_instance` might be performing.
2757 #[tracing::instrument(level = "debug", skip(tcx, param_env))]
2758 fn fn_sig_for_fn_abi(
2761 param_env: ty::ParamEnv<'tcx>,
2762 ) -> ty::PolyFnSig<'tcx> {
2763 let ty = self.ty(tcx, param_env);
2766 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2767 // parameters unused if they show up in the signature, but not in the `mir::Body`
2768 // (i.e. due to being inside a projection that got normalized, see
2769 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2770 // track of a polymorphization `ParamEnv` to allow normalizing later.
2771 let mut sig = match *ty.kind() {
2772 ty::FnDef(def_id, substs) => tcx
2773 .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
2774 .subst(tcx, substs),
2775 _ => unreachable!(),
2778 if let ty::InstanceDef::VTableShim(..) = self.def {
2779 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2780 sig = sig.map_bound(|mut sig| {
2781 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2782 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2783 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2789 ty::Closure(def_id, substs) => {
2790 let sig = substs.as_closure().sig();
2792 let bound_vars = tcx.mk_bound_variable_kinds(
2795 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2797 let br = ty::BoundRegion {
2798 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2799 kind: ty::BoundRegionKind::BrEnv,
2801 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2802 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2804 let sig = sig.skip_binder();
2805 ty::Binder::bind_with_vars(
2807 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2816 ty::Generator(_, substs, _) => {
2817 let sig = substs.as_generator().poly_sig();
2819 let bound_vars = tcx.mk_bound_variable_kinds(
2822 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2824 let br = ty::BoundRegion {
2825 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2826 kind: ty::BoundRegionKind::BrEnv,
2828 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2829 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2831 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2832 let pin_adt_ref = tcx.adt_def(pin_did);
2833 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2834 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2836 let sig = sig.skip_binder();
2837 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2838 let state_adt_ref = tcx.adt_def(state_did);
2839 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2840 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2841 ty::Binder::bind_with_vars(
2843 [env_ty, sig.resume_ty].iter(),
2846 hir::Unsafety::Normal,
2847 rustc_target::spec::abi::Abi::Rust,
2852 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2857 /// Calculates whether a function's ABI can unwind or not.
2859 /// This takes two primary parameters:
2861 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2862 /// codegen attrs for a defined function. For function pointers this set of
2863 /// flags is the empty set. This is only applicable for Rust-defined
2864 /// functions, and generally isn't needed except for small optimizations where
2865 /// we try to say a function which otherwise might look like it could unwind
2866 /// doesn't actually unwind (such as for intrinsics and such).
2868 /// * `abi` - this is the ABI that the function is defined with. This is the
2869 /// primary factor for determining whether a function can unwind or not.
2871 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2872 /// panics are implemented with unwinds on most platform (when
2873 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2874 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2875 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2876 /// defined for each ABI individually, but it always corresponds to some form of
2877 /// stack-based unwinding (the exact mechanism of which varies
2878 /// platform-by-platform).
2880 /// Rust functions are classified whether or not they can unwind based on the
2881 /// active "panic strategy". In other words Rust functions are considered to
2882 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2883 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2884 /// only if the final panic mode is panic=abort. In this scenario any code
2885 /// previously compiled assuming that a function can unwind is still correct, it
2886 /// just never happens to actually unwind at runtime.
2888 /// This function's answer to whether or not a function can unwind is quite
2889 /// impactful throughout the compiler. This affects things like:
2891 /// * Calling a function which can't unwind means codegen simply ignores any
2892 /// associated unwinding cleanup.
2893 /// * Calling a function which can unwind from a function which can't unwind
2894 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2895 /// aborts the process.
2896 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2897 /// affects various optimizations and codegen.
2899 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2900 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2901 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2902 /// might (from a foreign exception or similar).
2904 #[tracing::instrument(level = "debug", skip(tcx))]
2905 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2906 if let Some(did) = fn_def_id {
2907 // Special attribute for functions which can't unwind.
2908 if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2912 // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2914 // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2915 // function defined in Rust is also required to abort.
2916 if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2920 // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2922 // This is not part of `codegen_fn_attrs` as it can differ between crates
2923 // and therefore cannot be computed in core.
2924 if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2925 if Some(did) == tcx.lang_items().drop_in_place_fn() {
2931 // Otherwise if this isn't special then unwinding is generally determined by
2932 // the ABI of the itself. ABIs like `C` have variants which also
2933 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2934 // ABIs have such an option. Otherwise the only other thing here is Rust
2935 // itself, and those ABIs are determined by the panic strategy configured
2936 // for this compilation.
2938 // Unfortunately at this time there's also another caveat. Rust [RFC
2939 // 2945][rfc] has been accepted and is in the process of being implemented
2940 // and stabilized. In this interim state we need to deal with historical
2941 // rustc behavior as well as plan for future rustc behavior.
2943 // Historically functions declared with `extern "C"` were marked at the
2944 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2945 // or not. This is UB for functions in `panic=unwind` mode that then
2946 // actually panic and unwind. Note that this behavior is true for both
2947 // externally declared functions as well as Rust-defined function.
2949 // To fix this UB rustc would like to change in the future to catch unwinds
2950 // from function calls that may unwind within a Rust-defined `extern "C"`
2951 // function and forcibly abort the process, thereby respecting the
2952 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2953 // ready to roll out, so determining whether or not the `C` family of ABIs
2954 // unwinds is conditional not only on their definition but also whether the
2955 // `#![feature(c_unwind)]` feature gate is active.
2957 // Note that this means that unlike historical compilers rustc now, by
2958 // default, unconditionally thinks that the `C` ABI may unwind. This will
2959 // prevent some optimization opportunities, however, so we try to scope this
2960 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2961 // to `panic=abort`).
2963 // Eventually the check against `c_unwind` here will ideally get removed and
2964 // this'll be a little cleaner as it'll be a straightforward check of the
2967 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2973 | Stdcall { unwind }
2974 | Fastcall { unwind }
2975 | Vectorcall { unwind }
2976 | Thiscall { unwind }
2979 | SysV64 { unwind } => {
2981 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2989 | AvrNonBlockingInterrupt
2990 | CCmseNonSecureCall
2994 | Unadjusted => false,
2995 Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
3000 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
3001 use rustc_target::spec::abi::Abi::*;
3002 match tcx.sess.target.adjust_abi(abi) {
3003 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
3004 RustCold => Conv::RustCold,
3006 // It's the ABI's job to select this, not ours.
3007 System { .. } => bug!("system abi should be selected elsewhere"),
3008 EfiApi => bug!("eficall abi should be selected elsewhere"),
3010 Stdcall { .. } => Conv::X86Stdcall,
3011 Fastcall { .. } => Conv::X86Fastcall,
3012 Vectorcall { .. } => Conv::X86VectorCall,
3013 Thiscall { .. } => Conv::X86ThisCall,
3014 C { .. } => Conv::C,
3015 Unadjusted => Conv::C,
3016 Win64 { .. } => Conv::X86_64Win64,
3017 SysV64 { .. } => Conv::X86_64SysV,
3018 Aapcs { .. } => Conv::ArmAapcs,
3019 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
3020 PtxKernel => Conv::PtxKernel,
3021 Msp430Interrupt => Conv::Msp430Intr,
3022 X86Interrupt => Conv::X86Intr,
3023 AmdGpuKernel => Conv::AmdGpuKernel,
3024 AvrInterrupt => Conv::AvrInterrupt,
3025 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3028 // These API constants ought to be more specific...
3029 Cdecl { .. } => Conv::C,
3033 /// Error produced by attempting to compute or adjust a `FnAbi`.
3034 #[derive(Copy, Clone, Debug, HashStable)]
3035 pub enum FnAbiError<'tcx> {
3036 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3037 Layout(LayoutError<'tcx>),
3039 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3040 AdjustForForeignAbi(call::AdjustForForeignAbiError),
3043 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3044 fn from(err: LayoutError<'tcx>) -> Self {
3049 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3050 fn from(err: call::AdjustForForeignAbiError) -> Self {
3051 Self::AdjustForForeignAbi(err)
3055 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3056 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3058 Self::Layout(err) => err.fmt(f),
3059 Self::AdjustForForeignAbi(err) => err.fmt(f),
3064 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3065 // just for error handling.
3067 pub enum FnAbiRequest<'tcx> {
3068 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3069 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3072 /// Trait for contexts that want to be able to compute `FnAbi`s.
3073 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3074 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3075 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3076 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3077 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3079 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3080 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3082 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3083 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3084 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3085 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3086 fn handle_fn_abi_err(
3088 err: FnAbiError<'tcx>,
3090 fn_abi_request: FnAbiRequest<'tcx>,
3091 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3094 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3095 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3096 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3098 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3099 /// instead, where the instance is an `InstanceDef::Virtual`.
3101 fn fn_abi_of_fn_ptr(
3103 sig: ty::PolyFnSig<'tcx>,
3104 extra_args: &'tcx ty::List<Ty<'tcx>>,
3105 ) -> Self::FnAbiOfResult {
3106 // FIXME(eddyb) get a better `span` here.
3107 let span = self.layout_tcx_at_span();
3108 let tcx = self.tcx().at(span);
3110 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3111 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3115 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3116 /// direct calls to an `fn`.
3118 /// NB: that includes virtual calls, which are represented by "direct calls"
3119 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3121 #[tracing::instrument(level = "debug", skip(self))]
3122 fn fn_abi_of_instance(
3124 instance: ty::Instance<'tcx>,
3125 extra_args: &'tcx ty::List<Ty<'tcx>>,
3126 ) -> Self::FnAbiOfResult {
3127 // FIXME(eddyb) get a better `span` here.
3128 let span = self.layout_tcx_at_span();
3129 let tcx = self.tcx().at(span);
3132 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3133 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3134 // we can get some kind of span even if one wasn't provided.
3135 // However, we don't do this early in order to avoid calling
3136 // `def_span` unconditionally (which may have a perf penalty).
3137 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3138 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3144 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3146 fn fn_abi_of_fn_ptr<'tcx>(
3148 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3149 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3150 let (param_env, (sig, extra_args)) = query.into_parts();
3152 LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3155 fn fn_abi_of_instance<'tcx>(
3157 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3158 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3159 let (param_env, (instance, extra_args)) = query.into_parts();
3161 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3163 let caller_location = if instance.def.requires_caller_location(tcx) {
3164 Some(tcx.caller_location_ty())
3169 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3173 Some(instance.def_id()),
3174 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3178 // Handle safe Rust thin and fat pointers.
3179 pub fn adjust_for_rust_scalar<'tcx>(
3180 cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3181 attrs: &mut ArgAttributes,
3183 layout: TyAndLayout<'tcx>,
3187 // Booleans are always a noundef i1 that needs to be zero-extended.
3188 if scalar.is_bool() {
3189 attrs.ext(ArgExtension::Zext);
3190 attrs.set(ArgAttribute::NoUndef);
3194 // Scalars which have invalid values cannot be undef.
3195 if !scalar.is_always_valid(&cx) {
3196 attrs.set(ArgAttribute::NoUndef);
3199 // Only pointer types handled below.
3200 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3202 if !valid_range.contains(0) {
3203 attrs.set(ArgAttribute::NonNull);
3206 if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3207 if let Some(kind) = pointee.safe {
3208 attrs.pointee_align = Some(pointee.align);
3210 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3211 // for the entire duration of the function as they can be deallocated
3212 // at any time. Same for shared mutable references. If LLVM had a
3213 // way to say "dereferenceable on entry" we could use it here.
3214 attrs.pointee_size = match kind {
3215 PointerKind::UniqueBorrowed
3216 | PointerKind::UniqueBorrowedPinned
3217 | PointerKind::Frozen => pointee.size,
3218 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3221 // `Box`, `&T`, and `&mut T` cannot be undef.
3222 // Note that this only applies to the value of the pointer itself;
3223 // this attribute doesn't make it UB for the pointed-to data to be undef.
3224 attrs.set(ArgAttribute::NoUndef);
3226 // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3227 // `noalias` for it. This can be turned off using an unstable flag.
3228 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3229 let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3231 // `&mut` pointer parameters never alias other parameters,
3232 // or mutable global data
3234 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3235 // and can be marked as both `readonly` and `noalias`, as
3236 // LLVM's definition of `noalias` is based solely on memory
3237 // dependencies rather than pointer equality
3239 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3240 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3241 // or not to actually emit the attribute. It can also be controlled with the
3242 // `-Zmutable-noalias` debugging option.
3243 let no_alias = match kind {
3244 PointerKind::SharedMutable
3245 | PointerKind::UniqueBorrowed
3246 | PointerKind::UniqueBorrowedPinned => false,
3247 PointerKind::UniqueOwned => noalias_for_box,
3248 PointerKind::Frozen => !is_return,
3251 attrs.set(ArgAttribute::NoAlias);
3254 if kind == PointerKind::Frozen && !is_return {
3255 attrs.set(ArgAttribute::ReadOnly);
3258 if kind == PointerKind::UniqueBorrowed && !is_return {
3259 attrs.set(ArgAttribute::NoAliasMutRef);
3265 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3266 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3267 // arguments of this method, into a separate `struct`.
3268 #[tracing::instrument(
3270 skip(self, caller_location, fn_def_id, force_thin_self_ptr)
3272 fn fn_abi_new_uncached(
3274 sig: ty::PolyFnSig<'tcx>,
3275 extra_args: &[Ty<'tcx>],
3276 caller_location: Option<Ty<'tcx>>,
3277 fn_def_id: Option<DefId>,
3278 // FIXME(eddyb) replace this with something typed, like an `enum`.
3279 force_thin_self_ptr: bool,
3280 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3281 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3283 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3285 let mut inputs = sig.inputs();
3286 let extra_args = if sig.abi == RustCall {
3287 assert!(!sig.c_variadic && extra_args.is_empty());
3289 if let Some(input) = sig.inputs().last() {
3290 if let ty::Tuple(tupled_arguments) = input.kind() {
3291 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3295 "argument to function with \"rust-call\" ABI \
3301 "argument to function with \"rust-call\" ABI \
3306 assert!(sig.c_variadic || extra_args.is_empty());
3310 let target = &self.tcx.sess.target;
3311 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3312 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3313 let linux_s390x_gnu_like =
3314 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3315 let linux_sparc64_gnu_like =
3316 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3317 let linux_powerpc_gnu_like =
3318 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3320 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3322 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3323 let span = tracing::debug_span!("arg_of");
3324 let _entered = span.enter();
3325 let is_return = arg_idx.is_none();
3327 let layout = self.layout_of(ty)?;
3328 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3329 // Don't pass the vtable, it's not an argument of the virtual fn.
3330 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3331 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3332 make_thin_self_ptr(self, layout)
3337 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3338 let mut attrs = ArgAttributes::new();
3339 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3343 if arg.layout.is_zst() {
3344 // For some forsaken reason, x86_64-pc-windows-gnu
3345 // doesn't ignore zero-sized struct arguments.
3346 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3350 && !linux_s390x_gnu_like
3351 && !linux_sparc64_gnu_like
3352 && !linux_powerpc_gnu_like)
3354 arg.mode = PassMode::Ignore;
3361 let mut fn_abi = FnAbi {
3362 ret: arg_of(sig.output(), None)?,
3366 .chain(extra_args.iter().copied())
3367 .chain(caller_location)
3369 .map(|(i, ty)| arg_of(ty, Some(i)))
3370 .collect::<Result<_, _>>()?,
3371 c_variadic: sig.c_variadic,
3372 fixed_count: inputs.len() as u32,
3374 can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3376 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3377 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3378 Ok(self.tcx.arena.alloc(fn_abi))
3381 #[tracing::instrument(level = "trace", skip(self))]
3382 fn fn_abi_adjust_for_abi(
3384 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3386 ) -> Result<(), FnAbiError<'tcx>> {
3387 if abi == SpecAbi::Unadjusted {
3391 if abi == SpecAbi::Rust
3392 || abi == SpecAbi::RustCall
3393 || abi == SpecAbi::RustIntrinsic
3394 || abi == SpecAbi::PlatformIntrinsic
3396 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3397 if arg.is_ignore() {
3401 match arg.layout.abi {
3402 Abi::Aggregate { .. } => {}
3404 // This is a fun case! The gist of what this is doing is
3405 // that we want callers and callees to always agree on the
3406 // ABI of how they pass SIMD arguments. If we were to *not*
3407 // make these arguments indirect then they'd be immediates
3408 // in LLVM, which means that they'd used whatever the
3409 // appropriate ABI is for the callee and the caller. That
3410 // means, for example, if the caller doesn't have AVX
3411 // enabled but the callee does, then passing an AVX argument
3412 // across this boundary would cause corrupt data to show up.
3414 // This problem is fixed by unconditionally passing SIMD
3415 // arguments through memory between callers and callees
3416 // which should get them all to agree on ABI regardless of
3417 // target feature sets. Some more information about this
3418 // issue can be found in #44367.
3420 // Note that the platform intrinsic ABI is exempt here as
3421 // that's how we connect up to LLVM and it's unstable
3422 // anyway, we control all calls to it in libstd.
3424 if abi != SpecAbi::PlatformIntrinsic
3425 && self.tcx.sess.target.simd_types_indirect =>
3427 arg.make_indirect();
3434 let size = arg.layout.size;
3435 if arg.layout.is_unsized() || size > Pointer.size(self) {
3436 arg.make_indirect();
3438 // We want to pass small aggregates as immediates, but using
3439 // a LLVM aggregate type for this leads to bad optimizations,
3440 // so we pick an appropriately sized integer type instead.
3441 arg.cast_to(Reg { kind: RegKind::Integer, size });
3444 fixup(&mut fn_abi.ret);
3445 for arg in fn_abi.args.iter_mut() {
3449 fn_abi.adjust_for_foreign_abi(self, abi)?;
3456 #[tracing::instrument(level = "debug", skip(cx))]
3457 fn make_thin_self_ptr<'tcx>(
3458 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3459 layout: TyAndLayout<'tcx>,
3460 ) -> TyAndLayout<'tcx> {
3462 let fat_pointer_ty = if layout.is_unsized() {
3463 // unsized `self` is passed as a pointer to `self`
3464 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3465 tcx.mk_mut_ptr(layout.ty)
3468 Abi::ScalarPair(..) | Abi::Scalar(..) => (),
3469 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3472 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3473 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3474 // elsewhere in the compiler as a method on a `dyn Trait`.
3475 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3476 // get a built-in pointer type
3477 let mut fat_pointer_layout = layout;
3478 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3479 && !fat_pointer_layout.ty.is_region_ptr()
3481 for i in 0..fat_pointer_layout.fields.count() {
3482 let field_layout = fat_pointer_layout.field(cx, i);
3484 if !field_layout.is_zst() {
3485 fat_pointer_layout = field_layout;
3486 continue 'descend_newtypes;
3490 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3493 fat_pointer_layout.ty
3496 // we now have a type like `*mut RcBox<dyn Trait>`
3497 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3498 // this is understood as a special case elsewhere in the compiler
3499 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3504 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3505 // should always work because the type is always `*mut ()`.
3506 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()