1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_attr as attr;
8 use rustc_data_structures::intern::Interned;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
25 use std::num::NonZeroUsize;
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
31 pub fn provide(providers: &mut ty::query::Providers) {
33 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
36 pub trait IntegerExt {
37 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
50 impl IntegerExt for Integer {
52 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53 match (*self, signed) {
54 (I8, false) => tcx.types.u8,
55 (I16, false) => tcx.types.u16,
56 (I32, false) => tcx.types.u32,
57 (I64, false) => tcx.types.u64,
58 (I128, false) => tcx.types.u128,
59 (I8, true) => tcx.types.i8,
60 (I16, true) => tcx.types.i16,
61 (I32, true) => tcx.types.i32,
62 (I64, true) => tcx.types.i64,
63 (I128, true) => tcx.types.i128,
67 /// Gets the Integer type from an attr::IntType.
68 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69 let dl = cx.data_layout();
72 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78 dl.ptr_sized_integer()
83 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
86 ty::IntTy::I16 => I16,
87 ty::IntTy::I32 => I32,
88 ty::IntTy::I64 => I64,
89 ty::IntTy::I128 => I128,
90 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
93 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
96 ty::UintTy::U16 => I16,
97 ty::UintTy::U32 => I32,
98 ty::UintTy::U64 => I64,
99 ty::UintTy::U128 => I128,
100 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
104 /// Finds the appropriate Integer type and signedness for the given
105 /// signed discriminant range and `#[repr]` attribute.
106 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107 /// that shouldn't affect anything, other than maybe debuginfo.
114 ) -> (Integer, bool) {
115 // Theoretically, negative values could be larger in unsigned representation
116 // than the unsigned representation of the signed minimum. However, if there
117 // are any negative values, the only valid unsigned representation is u128
118 // which can fit all i128 values, so the result remains unaffected.
119 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
122 if let Some(ity) = repr.int {
123 let discr = Integer::from_attr(&tcx, ity);
124 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
127 "Integer::repr_discr: `#[repr]` hint too small for \
128 discriminant range of enum `{}",
132 return (discr, ity.is_signed());
135 let at_least = if repr.c() {
136 // This is usually I32, however it can be different on some platforms,
137 // notably hexagon and arm-none/thumb-none
138 tcx.data_layout().c_enum_min_size
140 // repr(Rust) enums try to be as small as possible
144 // If there are no negative values, we can use the unsigned fit.
146 (cmp::max(unsigned_fit, at_least), false)
148 (cmp::max(signed_fit, at_least), true)
153 pub trait PrimitiveExt {
154 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 impl PrimitiveExt for Primitive {
160 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
162 Int(i, signed) => i.to_ty(tcx, signed),
163 F32 => tcx.types.f32,
164 F64 => tcx.types.f64,
165 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169 /// Return an *integer* type matching this primitive.
170 /// Useful in particular when dealing with enum discriminants.
172 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
174 Int(i, signed) => i.to_ty(tcx, signed),
175 Pointer => tcx.types.usize,
176 F32 | F64 => bug!("floats do not have an int type"),
181 /// The first half of a fat pointer.
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
187 /// The second half of a fat pointer.
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
193 /// The maximum supported number of lanes in a SIMD vector.
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
203 SizeOverflow(Ty<'tcx>),
204 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
210 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211 LayoutError::SizeOverflow(ty) => {
212 write!(f, "values of the type `{}` are too big for the current architecture", ty)
214 LayoutError::NormalizationFailure(t, e) => write!(
216 "unable to determine layout for `{}` because `{}` cannot be normalized",
218 e.get_type_for_failure()
224 #[instrument(skip(tcx, query), level = "debug")]
227 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
228 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
229 ty::tls::with_related_context(tcx, move |icx| {
230 let (param_env, ty) = query.into_parts();
233 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
234 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
237 // Update the ImplicitCtxt to increase the layout_depth
238 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
240 ty::tls::enter_context(&icx, |_| {
241 let param_env = param_env.with_reveal_all_normalized(tcx);
242 let unnormalized_ty = ty;
244 // FIXME: We might want to have two different versions of `layout_of`:
245 // One that can be called after typecheck has completed and can use
246 // `normalize_erasing_regions` here and another one that can be called
247 // before typecheck has completed and uses `try_normalize_erasing_regions`.
248 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
250 Err(normalization_error) => {
251 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
255 if ty != unnormalized_ty {
256 // Ensure this layout is also cached for the normalized type.
257 return tcx.layout_of(param_env.and(ty));
260 let cx = LayoutCx { tcx, param_env };
262 let layout = cx.layout_of_uncached(ty)?;
263 let layout = TyAndLayout { ty, layout };
265 cx.record_layout_for_printing(layout);
267 // Type-level uninhabitedness should always imply ABI uninhabitedness.
268 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
269 assert!(layout.abi.is_uninhabited());
277 pub struct LayoutCx<'tcx, C> {
279 pub param_env: ty::ParamEnv<'tcx>,
282 #[derive(Copy, Clone, Debug)]
284 /// A tuple, closure, or univariant which cannot be coerced to unsized.
286 /// A univariant, the last field of which may be coerced to unsized.
288 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
289 Prefixed(Size, Align),
292 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
293 // This is used to go between `memory_index` (source field order to memory order)
294 // and `inverse_memory_index` (memory order to source field order).
295 // See also `FieldsShape::Arbitrary::memory_index` for more details.
296 // FIXME(eddyb) build a better abstraction for permutations, if possible.
297 fn invert_mapping(map: &[u32]) -> Vec<u32> {
298 let mut inverse = vec![0; map.len()];
299 for i in 0..map.len() {
300 inverse[map[i] as usize] = i as u32;
305 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
306 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
307 let dl = self.data_layout();
308 let b_align = b.align(dl);
309 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
310 let b_offset = a.size(dl).align_to(b_align.abi);
311 let size = (b_offset + b.size(dl)).align_to(align.abi);
313 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
314 // returns the last maximum.
315 let largest_niche = Niche::from_scalar(dl, b_offset, b)
317 .chain(Niche::from_scalar(dl, Size::ZERO, a))
318 .max_by_key(|niche| niche.available(dl));
321 variants: Variants::Single { index: VariantIdx::new(0) },
322 fields: FieldsShape::Arbitrary {
323 offsets: vec![Size::ZERO, b_offset],
324 memory_index: vec![0, 1],
326 abi: Abi::ScalarPair(a, b),
333 fn univariant_uninterned(
336 fields: &[TyAndLayout<'_>],
339 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
340 let dl = self.data_layout();
341 let pack = repr.pack;
342 if pack.is_some() && repr.align.is_some() {
343 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
344 return Err(LayoutError::Unknown(ty));
347 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
349 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
351 let optimize = !repr.inhibit_struct_field_reordering_opt();
354 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
355 let optimizing = &mut inverse_memory_index[..end];
356 let field_align = |f: &TyAndLayout<'_>| {
357 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
360 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
361 // the field ordering to try and catch some code making assumptions about layouts
362 // we don't guarantee
363 if repr.can_randomize_type_layout() {
364 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
365 // randomize field ordering with
366 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
368 // Shuffle the ordering of the fields
369 optimizing.shuffle(&mut rng);
371 // Otherwise we just leave things alone and actually optimize the type's fields
374 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
375 optimizing.sort_by_key(|&x| {
376 // Place ZSTs first to avoid "interesting offsets",
377 // especially with only one or two non-ZST fields.
378 let f = &fields[x as usize];
379 (!f.is_zst(), cmp::Reverse(field_align(f)))
383 StructKind::Prefixed(..) => {
384 // Sort in ascending alignment so that the layout stays optimal
385 // regardless of the prefix
386 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
390 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
391 // regardless of the status of `-Z randomize-layout`
395 // inverse_memory_index holds field indices by increasing memory offset.
396 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
397 // We now write field offsets to the corresponding offset slot;
398 // field 5 with offset 0 puts 0 in offsets[5].
399 // At the bottom of this function, we invert `inverse_memory_index` to
400 // produce `memory_index` (see `invert_mapping`).
402 let mut sized = true;
403 let mut offsets = vec![Size::ZERO; fields.len()];
404 let mut offset = Size::ZERO;
405 let mut largest_niche = None;
406 let mut largest_niche_available = 0;
408 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
410 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
411 align = align.max(AbiAndPrefAlign::new(prefix_align));
412 offset = prefix_size.align_to(prefix_align);
415 for &i in &inverse_memory_index {
416 let field = fields[i as usize];
418 self.tcx.sess.delay_span_bug(
421 "univariant: field #{} of `{}` comes after unsized field",
428 if field.is_unsized() {
432 // Invariant: offset < dl.obj_size_bound() <= 1<<61
433 let field_align = if let Some(pack) = pack {
434 field.align.min(AbiAndPrefAlign::new(pack))
438 offset = offset.align_to(field_align.abi);
439 align = align.max(field_align);
441 debug!("univariant offset: {:?} field: {:#?}", offset, field);
442 offsets[i as usize] = offset;
444 if !repr.hide_niche() {
445 if let Some(mut niche) = field.largest_niche {
446 let available = niche.available(dl);
447 if available > largest_niche_available {
448 largest_niche_available = available;
449 niche.offset += offset;
450 largest_niche = Some(niche);
455 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
458 if let Some(repr_align) = repr.align {
459 align = align.max(AbiAndPrefAlign::new(repr_align));
462 debug!("univariant min_size: {:?}", offset);
463 let min_size = offset;
465 // As stated above, inverse_memory_index holds field indices by increasing offset.
466 // This makes it an already-sorted view of the offsets vec.
467 // To invert it, consider:
468 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
469 // Field 5 would be the first element, so memory_index is i:
470 // Note: if we didn't optimize, it's already right.
473 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
475 let size = min_size.align_to(align.abi);
476 let mut abi = Abi::Aggregate { sized };
478 // Unpack newtype ABIs and find scalar pairs.
479 if sized && size.bytes() > 0 {
480 // All other fields must be ZSTs.
481 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
483 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
484 // We have exactly one non-ZST field.
485 (Some((i, field)), None, None) => {
486 // Field fills the struct and it has a scalar or scalar pair ABI.
487 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
490 // For plain scalars, or vectors of them, we can't unpack
491 // newtypes for `#[repr(C)]`, as that affects C ABIs.
492 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
495 // But scalar pairs are Rust-specific and get
496 // treated as aggregates by C ABIs anyway.
497 Abi::ScalarPair(..) => {
505 // Two non-ZST fields, and they're both scalars.
510 layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(a), .. }, _)),
517 layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(b), .. }, _)),
523 // Order by the memory placement, not source order.
524 let ((i, a), (j, b)) =
525 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
526 let pair = self.scalar_pair(a, b);
527 let pair_offsets = match pair.fields {
528 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
529 assert_eq!(memory_index, &[0, 1]);
534 if offsets[i] == pair_offsets[0]
535 && offsets[j] == pair_offsets[1]
536 && align == pair.align
539 // We can use `ScalarPair` only when it matches our
540 // already computed layout (including `#[repr(C)]`).
549 if fields.iter().any(|f| f.abi.is_uninhabited()) {
550 abi = Abi::Uninhabited;
554 variants: Variants::Single { index: VariantIdx::new(0) },
555 fields: FieldsShape::Arbitrary { offsets, memory_index },
563 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
565 let param_env = self.param_env;
566 let dl = self.data_layout();
567 let scalar_unit = |value: Primitive| {
568 let size = value.size(dl);
569 assert!(size.bits() <= 128);
570 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
573 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
575 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
576 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
578 debug_assert!(!ty.has_infer_types_or_consts());
580 Ok(match *ty.kind() {
582 ty::Bool => tcx.intern_layout(LayoutS::scalar(
584 Scalar::Initialized {
585 value: Int(I8, false),
586 valid_range: WrappingRange { start: 0, end: 1 },
589 ty::Char => tcx.intern_layout(LayoutS::scalar(
591 Scalar::Initialized {
592 value: Int(I32, false),
593 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
596 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
597 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
598 ty::Float(fty) => scalar(match fty {
599 ty::FloatTy::F32 => F32,
600 ty::FloatTy::F64 => F64,
603 let mut ptr = scalar_unit(Pointer);
604 ptr.valid_range_mut().start = 1;
605 tcx.intern_layout(LayoutS::scalar(self, ptr))
609 ty::Never => tcx.intern_layout(LayoutS {
610 variants: Variants::Single { index: VariantIdx::new(0) },
611 fields: FieldsShape::Primitive,
612 abi: Abi::Uninhabited,
618 // Potentially-wide pointers.
619 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
620 let mut data_ptr = scalar_unit(Pointer);
621 if !ty.is_unsafe_ptr() {
622 data_ptr.valid_range_mut().start = 1;
625 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
626 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
627 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
630 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
631 let metadata = match unsized_part.kind() {
633 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
635 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
637 let mut vtable = scalar_unit(Pointer);
638 vtable.valid_range_mut().start = 1;
641 _ => return Err(LayoutError::Unknown(unsized_part)),
644 // Effectively a (ptr, meta) tuple.
645 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
648 // Arrays and slices.
649 ty::Array(element, mut count) => {
650 if count.has_projections() {
651 count = tcx.normalize_erasing_regions(param_env, count);
652 if count.has_projections() {
653 return Err(LayoutError::Unknown(ty));
657 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
658 let element = self.layout_of(element)?;
660 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
663 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
666 Abi::Aggregate { sized: true }
669 let largest_niche = if count != 0 { element.largest_niche } else { None };
671 tcx.intern_layout(LayoutS {
672 variants: Variants::Single { index: VariantIdx::new(0) },
673 fields: FieldsShape::Array { stride: element.size, count },
676 align: element.align,
680 ty::Slice(element) => {
681 let element = self.layout_of(element)?;
682 tcx.intern_layout(LayoutS {
683 variants: Variants::Single { index: VariantIdx::new(0) },
684 fields: FieldsShape::Array { stride: element.size, count: 0 },
685 abi: Abi::Aggregate { sized: false },
687 align: element.align,
691 ty::Str => tcx.intern_layout(LayoutS {
692 variants: Variants::Single { index: VariantIdx::new(0) },
693 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
694 abi: Abi::Aggregate { sized: false },
701 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
702 ty::Dynamic(..) | ty::Foreign(..) => {
703 let mut unit = self.univariant_uninterned(
706 &ReprOptions::default(),
707 StructKind::AlwaysSized,
710 Abi::Aggregate { ref mut sized } => *sized = false,
713 tcx.intern_layout(unit)
716 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
718 ty::Closure(_, ref substs) => {
719 let tys = substs.as_closure().upvar_tys();
721 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
722 &ReprOptions::default(),
723 StructKind::AlwaysSized,
729 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
732 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
733 &ReprOptions::default(),
738 // SIMD vector types.
739 ty::Adt(def, substs) if def.repr().simd() => {
740 if !def.is_struct() {
741 // Should have yielded E0517 by now.
742 tcx.sess.delay_span_bug(
744 "#[repr(simd)] was applied to an ADT that is not a struct",
746 return Err(LayoutError::Unknown(ty));
749 // Supported SIMD vectors are homogeneous ADTs with at least one field:
751 // * #[repr(simd)] struct S(T, T, T, T);
752 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
753 // * #[repr(simd)] struct S([T; 4])
755 // where T is a primitive scalar (integer/float/pointer).
757 // SIMD vectors with zero fields are not supported.
758 // (should be caught by typeck)
759 if def.non_enum_variant().fields.is_empty() {
760 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
763 // Type of the first ADT field:
764 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
766 // Heterogeneous SIMD vectors are not supported:
767 // (should be caught by typeck)
768 for fi in &def.non_enum_variant().fields {
769 if fi.ty(tcx, substs) != f0_ty {
770 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
774 // The element type and number of elements of the SIMD vector
775 // are obtained from:
777 // * the element type and length of the single array field, if
778 // the first field is of array type, or
780 // * the homogenous field type and the number of fields.
781 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
782 // First ADT field is an array:
784 // SIMD vectors with multiple array fields are not supported:
785 // (should be caught by typeck)
786 if def.non_enum_variant().fields.len() != 1 {
787 tcx.sess.fatal(&format!(
788 "monomorphising SIMD type `{}` with more than one array field",
793 // Extract the number of elements from the layout of the array field:
795 layout: Layout(Interned(LayoutS { fields: FieldsShape::Array { count, .. }, .. }, _)),
797 }) = self.layout_of(f0_ty) else {
798 return Err(LayoutError::Unknown(ty));
801 (*e_ty, *count, true)
803 // First ADT field is not an array:
804 (f0_ty, def.non_enum_variant().fields.len() as _, false)
807 // SIMD vectors of zero length are not supported.
808 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
811 // Can't be caught in typeck if the array length is generic.
813 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
814 } else if e_len > MAX_SIMD_LANES {
815 tcx.sess.fatal(&format!(
816 "monomorphising SIMD type `{}` of length greater than {}",
821 // Compute the ABI of the element type:
822 let e_ly = self.layout_of(e_ty)?;
823 let Abi::Scalar(e_abi) = e_ly.abi else {
824 // This error isn't caught in typeck, e.g., if
825 // the element type of the vector is generic.
826 tcx.sess.fatal(&format!(
827 "monomorphising SIMD type `{}` with a non-primitive-scalar \
828 (integer/float/pointer) element type `{}`",
833 // Compute the size and alignment of the vector:
834 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
835 let align = dl.vector_align(size);
836 let size = size.align_to(align.abi);
838 // Compute the placement of the vector fields:
839 let fields = if is_array {
840 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
842 FieldsShape::Array { stride: e_ly.size, count: e_len }
845 tcx.intern_layout(LayoutS {
846 variants: Variants::Single { index: VariantIdx::new(0) },
848 abi: Abi::Vector { element: e_abi, count: e_len },
849 largest_niche: e_ly.largest_niche,
856 ty::Adt(def, substs) => {
857 // Cache the field layouts.
864 .map(|field| self.layout_of(field.ty(tcx, substs)))
865 .collect::<Result<Vec<_>, _>>()
867 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
870 if def.repr().pack.is_some() && def.repr().align.is_some() {
871 self.tcx.sess.delay_span_bug(
872 tcx.def_span(def.did()),
873 "union cannot be packed and aligned",
875 return Err(LayoutError::Unknown(ty));
879 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
881 if let Some(repr_align) = def.repr().align {
882 align = align.max(AbiAndPrefAlign::new(repr_align));
885 let optimize = !def.repr().inhibit_union_abi_opt();
886 let mut size = Size::ZERO;
887 let mut abi = Abi::Aggregate { sized: true };
888 let index = VariantIdx::new(0);
889 for field in &variants[index] {
890 assert!(!field.is_unsized());
891 align = align.max(field.align);
893 // If all non-ZST fields have the same ABI, forward this ABI
894 if optimize && !field.is_zst() {
895 // Discard valid range information and allow undef
896 let field_abi = match field.abi {
897 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
898 Abi::ScalarPair(x, y) => {
899 Abi::ScalarPair(x.to_union(), y.to_union())
901 Abi::Vector { element: x, count } => {
902 Abi::Vector { element: x.to_union(), count }
904 Abi::Uninhabited | Abi::Aggregate { .. } => {
905 Abi::Aggregate { sized: true }
909 if size == Size::ZERO {
910 // first non ZST: initialize 'abi'
912 } else if abi != field_abi {
913 // different fields have different ABI: reset to Aggregate
914 abi = Abi::Aggregate { sized: true };
918 size = cmp::max(size, field.size);
921 if let Some(pack) = def.repr().pack {
922 align = align.min(AbiAndPrefAlign::new(pack));
925 return Ok(tcx.intern_layout(LayoutS {
926 variants: Variants::Single { index },
927 fields: FieldsShape::Union(
928 NonZeroUsize::new(variants[index].len())
929 .ok_or(LayoutError::Unknown(ty))?,
934 size: size.align_to(align.abi),
938 // A variant is absent if it's uninhabited and only has ZST fields.
939 // Present uninhabited variants only require space for their fields,
940 // but *not* an encoding of the discriminant (e.g., a tag value).
941 // See issue #49298 for more details on the need to leave space
942 // for non-ZST uninhabited data (mostly partial initialization).
943 let absent = |fields: &[TyAndLayout<'_>]| {
944 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
945 let is_zst = fields.iter().all(|f| f.is_zst());
946 uninhabited && is_zst
948 let (present_first, present_second) = {
949 let mut present_variants = variants
951 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
952 (present_variants.next(), present_variants.next())
954 let present_first = match present_first {
955 Some(present_first) => present_first,
956 // Uninhabited because it has no variants, or only absent ones.
957 None if def.is_enum() => {
958 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
960 // If it's a struct, still compute a layout so that we can still compute the
962 None => VariantIdx::new(0),
965 let is_struct = !def.is_enum() ||
966 // Only one variant is present.
967 (present_second.is_none() &&
968 // Representation optimizations are allowed.
969 !def.repr().inhibit_enum_layout_opt());
971 // Struct, or univariant enum equivalent to a struct.
972 // (Typechecking will reject discriminant-sizing attrs.)
974 let v = present_first;
975 let kind = if def.is_enum() || variants[v].is_empty() {
976 StructKind::AlwaysSized
978 let param_env = tcx.param_env(def.did());
979 let last_field = def.variant(v).fields.last().unwrap();
981 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
983 StructKind::MaybeUnsized
985 StructKind::AlwaysSized
989 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
990 st.variants = Variants::Single { index: v };
991 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
993 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
994 // the asserts ensure that we are not using the
995 // `#[rustc_layout_scalar_valid_range(n)]`
996 // attribute to widen the range of anything as that would probably
997 // result in UB somewhere
998 // FIXME(eddyb) the asserts are probably not needed,
999 // as larger validity ranges would result in missed
1000 // optimizations, *not* wrongly assuming the inner
1001 // value is valid. e.g. unions enlarge validity ranges,
1002 // because the values may be uninitialized.
1003 if let Bound::Included(start) = start {
1004 // FIXME(eddyb) this might be incorrect - it doesn't
1005 // account for wrap-around (end < start) ranges.
1006 let valid_range = scalar.valid_range_mut();
1007 assert!(valid_range.start <= start);
1008 valid_range.start = start;
1010 if let Bound::Included(end) = end {
1011 // FIXME(eddyb) this might be incorrect - it doesn't
1012 // account for wrap-around (end < start) ranges.
1013 let valid_range = scalar.valid_range_mut();
1014 assert!(valid_range.end >= end);
1015 valid_range.end = end;
1018 // Update `largest_niche` if we have introduced a larger niche.
1019 let niche = if def.repr().hide_niche() {
1022 Niche::from_scalar(dl, Size::ZERO, *scalar)
1024 if let Some(niche) = niche {
1025 match st.largest_niche {
1026 Some(largest_niche) => {
1027 // Replace the existing niche even if they're equal,
1028 // because this one is at a lower offset.
1029 if largest_niche.available(dl) <= niche.available(dl) {
1030 st.largest_niche = Some(niche);
1033 None => st.largest_niche = Some(niche),
1038 start == Bound::Unbounded && end == Bound::Unbounded,
1039 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1045 return Ok(tcx.intern_layout(st));
1048 // At this point, we have handled all unions and
1049 // structs. (We have also handled univariant enums
1050 // that allow representation optimization.)
1051 assert!(def.is_enum());
1053 // The current code for niche-filling relies on variant indices
1054 // instead of actual discriminants, so dataful enums with
1055 // explicit discriminants (RFC #2363) would misbehave.
1056 let no_explicit_discriminants = def
1059 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1061 let mut niche_filling_layout = None;
1063 // Niche-filling enum optimization.
1064 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1065 let mut dataful_variant = None;
1066 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1068 // Find one non-ZST variant.
1069 'variants: for (v, fields) in variants.iter_enumerated() {
1075 if dataful_variant.is_none() {
1076 dataful_variant = Some(v);
1079 dataful_variant = None;
1084 niche_variants = *niche_variants.start().min(&v)..=v;
1087 if niche_variants.start() > niche_variants.end() {
1088 dataful_variant = None;
1091 if let Some(i) = dataful_variant {
1092 let count = (niche_variants.end().as_u32()
1093 - niche_variants.start().as_u32()
1096 // Find the field with the largest niche
1097 let niche_candidate = variants[i]
1100 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1101 .max_by_key(|(_, niche)| niche.available(dl));
1103 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1104 niche_candidate.and_then(|(field_index, niche)| {
1105 Some((field_index, niche, niche.reserve(self, count)?))
1108 let mut align = dl.aggregate_align;
1112 let mut st = self.univariant_uninterned(
1116 StructKind::AlwaysSized,
1118 st.variants = Variants::Single { index: j };
1120 align = align.max(st.align);
1122 Ok(tcx.intern_layout(st))
1124 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1126 let offset = st[i].fields().offset(field_index) + niche.offset;
1127 let size = st[i].size();
1129 let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1133 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1134 Abi::ScalarPair(first, second) => {
1135 // We need to use scalar_unit to reset the
1136 // valid range to the maximal one for that
1137 // primitive, because only the niche is
1138 // guaranteed to be initialised, not the
1140 if offset.bytes() == 0 {
1143 scalar_unit(second.primitive()),
1147 scalar_unit(first.primitive()),
1152 _ => Abi::Aggregate { sized: true },
1156 let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1158 niche_filling_layout = Some(LayoutS {
1159 variants: Variants::Multiple {
1161 tag_encoding: TagEncoding::Niche {
1169 fields: FieldsShape::Arbitrary {
1170 offsets: vec![offset],
1171 memory_index: vec![0],
1182 let (mut min, mut max) = (i128::MAX, i128::MIN);
1183 let discr_type = def.repr().discr_type();
1184 let bits = Integer::from_attr(self, discr_type).size().bits();
1185 for (i, discr) in def.discriminants(tcx) {
1186 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1189 let mut x = discr.val as i128;
1190 if discr_type.is_signed() {
1191 // sign extend the raw representation to be an i128
1192 x = (x << (128 - bits)) >> (128 - bits);
1201 // We might have no inhabited variants, so pretend there's at least one.
1202 if (min, max) == (i128::MAX, i128::MIN) {
1206 assert!(min <= max, "discriminant range is {}...{}", min, max);
1207 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1209 let mut align = dl.aggregate_align;
1210 let mut size = Size::ZERO;
1212 // We're interested in the smallest alignment, so start large.
1213 let mut start_align = Align::from_bytes(256).unwrap();
1214 assert_eq!(Integer::for_align(dl, start_align), None);
1216 // repr(C) on an enum tells us to make a (tag, union) layout,
1217 // so we need to grow the prefix alignment to be at least
1218 // the alignment of the union. (This value is used both for
1219 // determining the alignment of the overall enum, and the
1220 // determining the alignment of the payload after the tag.)
1221 let mut prefix_align = min_ity.align(dl).abi;
1223 for fields in &variants {
1224 for field in fields {
1225 prefix_align = prefix_align.max(field.align.abi);
1230 // Create the set of structs that represent each variant.
1231 let mut layout_variants = variants
1233 .map(|(i, field_layouts)| {
1234 let mut st = self.univariant_uninterned(
1238 StructKind::Prefixed(min_ity.size(), prefix_align),
1240 st.variants = Variants::Single { index: i };
1241 // Find the first field we can't move later
1242 // to make room for a larger discriminant.
1244 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1246 if !field.is_zst() || field.align.abi.bytes() != 1 {
1247 start_align = start_align.min(field.align.abi);
1251 size = cmp::max(size, st.size);
1252 align = align.max(st.align);
1255 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1257 // Align the maximum variant size to the largest alignment.
1258 size = size.align_to(align.abi);
1260 if size.bytes() >= dl.obj_size_bound() {
1261 return Err(LayoutError::SizeOverflow(ty));
1264 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1265 if typeck_ity < min_ity {
1266 // It is a bug if Layout decided on a greater discriminant size than typeck for
1267 // some reason at this point (based on values discriminant can take on). Mostly
1268 // because this discriminant will be loaded, and then stored into variable of
1269 // type calculated by typeck. Consider such case (a bug): typeck decided on
1270 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1271 // discriminant values. That would be a bug, because then, in codegen, in order
1272 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1273 // space necessary to represent would have to be discarded (or layout is wrong
1274 // on thinking it needs 16 bits)
1276 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1280 // However, it is fine to make discr type however large (as an optimisation)
1281 // after this point – we’ll just truncate the value we load in codegen.
1284 // Check to see if we should use a different type for the
1285 // discriminant. We can safely use a type with the same size
1286 // as the alignment of the first field of each variant.
1287 // We increase the size of the discriminant to avoid LLVM copying
1288 // padding when it doesn't need to. This normally causes unaligned
1289 // load/stores and excessive memcpy/memset operations. By using a
1290 // bigger integer size, LLVM can be sure about its contents and
1291 // won't be so conservative.
1293 // Use the initial field alignment
1294 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1297 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1300 // If the alignment is not larger than the chosen discriminant size,
1301 // don't use the alignment as the final size.
1305 // Patch up the variants' first few fields.
1306 let old_ity_size = min_ity.size();
1307 let new_ity_size = ity.size();
1308 for variant in &mut layout_variants {
1309 match variant.fields {
1310 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1312 if *i <= old_ity_size {
1313 assert_eq!(*i, old_ity_size);
1317 // We might be making the struct larger.
1318 if variant.size <= old_ity_size {
1319 variant.size = new_ity_size;
1327 let tag_mask = ity.size().unsigned_int_max();
1328 let tag = Scalar::Initialized {
1329 value: Int(ity, signed),
1330 valid_range: WrappingRange {
1331 start: (min as u128 & tag_mask),
1332 end: (max as u128 & tag_mask),
1335 let mut abi = Abi::Aggregate { sized: true };
1337 // Without latter check aligned enums with custom discriminant values
1338 // Would result in ICE see the issue #92464 for more info
1339 if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1340 abi = Abi::Scalar(tag);
1342 // Try to use a ScalarPair for all tagged enums.
1343 let mut common_prim = None;
1344 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1345 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1349 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1350 let (field, offset) = match (fields.next(), fields.next()) {
1351 (None, None) => continue,
1352 (Some(pair), None) => pair,
1358 let prim = match field.abi {
1359 Abi::Scalar(scalar) => scalar.primitive(),
1365 if let Some(pair) = common_prim {
1366 // This is pretty conservative. We could go fancier
1367 // by conflating things like i32 and u32, or even
1368 // realising that (u8, u8) could just cohabit with
1370 if pair != (prim, offset) {
1375 common_prim = Some((prim, offset));
1378 if let Some((prim, offset)) = common_prim {
1379 let pair = self.scalar_pair(tag, scalar_unit(prim));
1380 let pair_offsets = match pair.fields {
1381 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1382 assert_eq!(memory_index, &[0, 1]);
1387 if pair_offsets[0] == Size::ZERO
1388 && pair_offsets[1] == *offset
1389 && align == pair.align
1390 && size == pair.size
1392 // We can use `ScalarPair` only when it matches our
1393 // already computed layout (including `#[repr(C)]`).
1399 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1400 abi = Abi::Uninhabited;
1403 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1405 let layout_variants =
1406 layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1408 let tagged_layout = LayoutS {
1409 variants: Variants::Multiple {
1411 tag_encoding: TagEncoding::Direct,
1413 variants: layout_variants,
1415 fields: FieldsShape::Arbitrary {
1416 offsets: vec![Size::ZERO],
1417 memory_index: vec![0],
1425 let best_layout = match (tagged_layout, niche_filling_layout) {
1426 (tagged_layout, Some(niche_filling_layout)) => {
1427 // Pick the smaller layout; otherwise,
1428 // pick the layout with the larger niche; otherwise,
1429 // pick tagged as it has simpler codegen.
1430 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1431 let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1432 (layout.size, cmp::Reverse(niche_size))
1435 (tagged_layout, None) => tagged_layout,
1438 tcx.intern_layout(best_layout)
1441 // Types with no meaningful known layout.
1442 ty::Projection(_) | ty::Opaque(..) => {
1443 // NOTE(eddyb) `layout_of` query should've normalized these away,
1444 // if that was possible, so there's no reason to try again here.
1445 return Err(LayoutError::Unknown(ty));
1448 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1449 bug!("Layout::compute: unexpected type `{}`", ty)
1452 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1453 return Err(LayoutError::Unknown(ty));
1459 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1460 #[derive(Clone, Debug, PartialEq)]
1461 enum SavedLocalEligibility {
1463 Assigned(VariantIdx),
1464 // FIXME: Use newtype_index so we aren't wasting bytes
1465 Ineligible(Option<u32>),
1468 // When laying out generators, we divide our saved local fields into two
1469 // categories: overlap-eligible and overlap-ineligible.
1471 // Those fields which are ineligible for overlap go in a "prefix" at the
1472 // beginning of the layout, and always have space reserved for them.
1474 // Overlap-eligible fields are only assigned to one variant, so we lay
1475 // those fields out for each variant and put them right after the
1478 // Finally, in the layout details, we point to the fields from the
1479 // variants they are assigned to. It is possible for some fields to be
1480 // included in multiple variants. No field ever "moves around" in the
1481 // layout; its offset is always the same.
1483 // Also included in the layout are the upvars and the discriminant.
1484 // These are included as fields on the "outer" layout; they are not part
1486 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1487 /// Compute the eligibility and assignment of each local.
1488 fn generator_saved_local_eligibility(
1490 info: &GeneratorLayout<'tcx>,
1491 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1492 use SavedLocalEligibility::*;
1494 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1495 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1497 // The saved locals not eligible for overlap. These will get
1498 // "promoted" to the prefix of our generator.
1499 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1501 // Figure out which of our saved locals are fields in only
1502 // one variant. The rest are deemed ineligible for overlap.
1503 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1504 for local in fields {
1505 match assignments[*local] {
1507 assignments[*local] = Assigned(variant_index);
1510 // We've already seen this local at another suspension
1511 // point, so it is no longer a candidate.
1513 "removing local {:?} in >1 variant ({:?}, {:?})",
1518 ineligible_locals.insert(*local);
1519 assignments[*local] = Ineligible(None);
1526 // Next, check every pair of eligible locals to see if they
1528 for local_a in info.storage_conflicts.rows() {
1529 let conflicts_a = info.storage_conflicts.count(local_a);
1530 if ineligible_locals.contains(local_a) {
1534 for local_b in info.storage_conflicts.iter(local_a) {
1535 // local_a and local_b are storage live at the same time, therefore they
1536 // cannot overlap in the generator layout. The only way to guarantee
1537 // this is if they are in the same variant, or one is ineligible
1538 // (which means it is stored in every variant).
1539 if ineligible_locals.contains(local_b)
1540 || assignments[local_a] == assignments[local_b]
1545 // If they conflict, we will choose one to make ineligible.
1546 // This is not always optimal; it's just a greedy heuristic that
1547 // seems to produce good results most of the time.
1548 let conflicts_b = info.storage_conflicts.count(local_b);
1549 let (remove, other) =
1550 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1551 ineligible_locals.insert(remove);
1552 assignments[remove] = Ineligible(None);
1553 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1557 // Count the number of variants in use. If only one of them, then it is
1558 // impossible to overlap any locals in our layout. In this case it's
1559 // always better to make the remaining locals ineligible, so we can
1560 // lay them out with the other locals in the prefix and eliminate
1561 // unnecessary padding bytes.
1563 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1564 for assignment in &assignments {
1565 if let Assigned(idx) = assignment {
1566 used_variants.insert(*idx);
1569 if used_variants.count() < 2 {
1570 for assignment in assignments.iter_mut() {
1571 *assignment = Ineligible(None);
1573 ineligible_locals.insert_all();
1577 // Write down the order of our locals that will be promoted to the prefix.
1579 for (idx, local) in ineligible_locals.iter().enumerate() {
1580 assignments[local] = Ineligible(Some(idx as u32));
1583 debug!("generator saved local assignments: {:?}", assignments);
1585 (ineligible_locals, assignments)
1588 /// Compute the full generator layout.
1589 fn generator_layout(
1592 def_id: hir::def_id::DefId,
1593 substs: SubstsRef<'tcx>,
1594 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1595 use SavedLocalEligibility::*;
1597 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1599 let Some(info) = tcx.generator_layout(def_id) else {
1600 return Err(LayoutError::Unknown(ty));
1602 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1604 // Build a prefix layout, including "promoting" all ineligible
1605 // locals as part of the prefix. We compute the layout of all of
1606 // these fields at once to get optimal packing.
1607 let tag_index = substs.as_generator().prefix_tys().count();
1609 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1610 let max_discr = (info.variant_fields.len() - 1) as u128;
1611 let discr_int = Integer::fit_unsigned(max_discr);
1612 let discr_int_ty = discr_int.to_ty(tcx, false);
1613 let tag = Scalar::Initialized {
1614 value: Primitive::Int(discr_int, false),
1615 valid_range: WrappingRange { start: 0, end: max_discr },
1617 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1618 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1620 let promoted_layouts = ineligible_locals
1622 .map(|local| subst_field(info.field_tys[local]))
1623 .map(|ty| tcx.mk_maybe_uninit(ty))
1624 .map(|ty| self.layout_of(ty));
1625 let prefix_layouts = substs
1628 .map(|ty| self.layout_of(ty))
1629 .chain(iter::once(Ok(tag_layout)))
1630 .chain(promoted_layouts)
1631 .collect::<Result<Vec<_>, _>>()?;
1632 let prefix = self.univariant_uninterned(
1635 &ReprOptions::default(),
1636 StructKind::AlwaysSized,
1639 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1641 // Split the prefix layout into the "outer" fields (upvars and
1642 // discriminant) and the "promoted" fields. Promoted fields will
1643 // get included in each variant that requested them in
1645 debug!("prefix = {:#?}", prefix);
1646 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1647 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1648 let mut inverse_memory_index = invert_mapping(&memory_index);
1650 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1651 // "outer" and "promoted" fields respectively.
1652 let b_start = (tag_index + 1) as u32;
1653 let offsets_b = offsets.split_off(b_start as usize);
1654 let offsets_a = offsets;
1656 // Disentangle the "a" and "b" components of `inverse_memory_index`
1657 // by preserving the order but keeping only one disjoint "half" each.
1658 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1659 let inverse_memory_index_b: Vec<_> =
1660 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1661 inverse_memory_index.retain(|&i| i < b_start);
1662 let inverse_memory_index_a = inverse_memory_index;
1664 // Since `inverse_memory_index_{a,b}` each only refer to their
1665 // respective fields, they can be safely inverted
1666 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1667 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1670 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1671 (outer_fields, offsets_b, memory_index_b)
1676 let mut size = prefix.size;
1677 let mut align = prefix.align;
1681 .map(|(index, variant_fields)| {
1682 // Only include overlap-eligible fields when we compute our variant layout.
1683 let variant_only_tys = variant_fields
1685 .filter(|local| match assignments[**local] {
1686 Unassigned => bug!(),
1687 Assigned(v) if v == index => true,
1688 Assigned(_) => bug!("assignment does not match variant"),
1689 Ineligible(_) => false,
1691 .map(|local| subst_field(info.field_tys[*local]));
1693 let mut variant = self.univariant_uninterned(
1696 .map(|ty| self.layout_of(ty))
1697 .collect::<Result<Vec<_>, _>>()?,
1698 &ReprOptions::default(),
1699 StructKind::Prefixed(prefix_size, prefix_align.abi),
1701 variant.variants = Variants::Single { index };
1703 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1707 // Now, stitch the promoted and variant-only fields back together in
1708 // the order they are mentioned by our GeneratorLayout.
1709 // Because we only use some subset (that can differ between variants)
1710 // of the promoted fields, we can't just pick those elements of the
1711 // `promoted_memory_index` (as we'd end up with gaps).
1712 // So instead, we build an "inverse memory_index", as if all of the
1713 // promoted fields were being used, but leave the elements not in the
1714 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1715 // obtain a valid (bijective) mapping.
1716 const INVALID_FIELD_IDX: u32 = !0;
1717 let mut combined_inverse_memory_index =
1718 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1719 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1720 let combined_offsets = variant_fields
1724 let (offset, memory_index) = match assignments[*local] {
1725 Unassigned => bug!(),
1727 let (offset, memory_index) =
1728 offsets_and_memory_index.next().unwrap();
1729 (offset, promoted_memory_index.len() as u32 + memory_index)
1731 Ineligible(field_idx) => {
1732 let field_idx = field_idx.unwrap() as usize;
1733 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1736 combined_inverse_memory_index[memory_index as usize] = i as u32;
1741 // Remove the unused slots and invert the mapping to obtain the
1742 // combined `memory_index` (also see previous comment).
1743 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1744 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1746 variant.fields = FieldsShape::Arbitrary {
1747 offsets: combined_offsets,
1748 memory_index: combined_memory_index,
1751 size = size.max(variant.size);
1752 align = align.max(variant.align);
1753 Ok(tcx.intern_layout(variant))
1755 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1757 size = size.align_to(align.abi);
1760 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1763 Abi::Aggregate { sized: true }
1766 let layout = tcx.intern_layout(LayoutS {
1767 variants: Variants::Multiple {
1769 tag_encoding: TagEncoding::Direct,
1770 tag_field: tag_index,
1773 fields: outer_fields,
1775 largest_niche: prefix.largest_niche,
1779 debug!("generator layout ({:?}): {:#?}", ty, layout);
1783 /// This is invoked by the `layout_of` query to record the final
1784 /// layout of each type.
1786 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1787 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1788 // for dumping later.
1789 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1790 self.record_layout_for_printing_outlined(layout)
1794 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1795 // Ignore layouts that are done with non-empty environments or
1796 // non-monomorphic layouts, as the user only wants to see the stuff
1797 // resulting from the final codegen session.
1798 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1802 // (delay format until we actually need it)
1803 let record = |kind, packed, opt_discr_size, variants| {
1804 let type_desc = format!("{:?}", layout.ty);
1805 self.tcx.sess.code_stats.record_type_size(
1816 let adt_def = match *layout.ty.kind() {
1817 ty::Adt(ref adt_def, _) => {
1818 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1822 ty::Closure(..) => {
1823 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1824 record(DataTypeKind::Closure, false, None, vec![]);
1829 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1834 let adt_kind = adt_def.adt_kind();
1835 let adt_packed = adt_def.repr().pack.is_some();
1837 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1838 let mut min_size = Size::ZERO;
1839 let field_info: Vec<_> = flds
1843 let field_layout = layout.field(self, i);
1844 let offset = layout.fields.offset(i);
1845 let field_end = offset + field_layout.size;
1846 if min_size < field_end {
1847 min_size = field_end;
1850 name: name.to_string(),
1851 offset: offset.bytes(),
1852 size: field_layout.size.bytes(),
1853 align: field_layout.align.abi.bytes(),
1859 name: n.map(|n| n.to_string()),
1860 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1861 align: layout.align.abi.bytes(),
1862 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1867 match layout.variants {
1868 Variants::Single { index } => {
1869 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1871 "print-type-size `{:#?}` variant {}",
1873 adt_def.variant(index).name
1875 let variant_def = &adt_def.variant(index);
1876 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1881 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1884 // (This case arises for *empty* enums; so give it
1886 record(adt_kind.into(), adt_packed, None, vec![]);
1890 Variants::Multiple { tag, ref tag_encoding, .. } => {
1892 "print-type-size `{:#?}` adt general variants def {}",
1894 adt_def.variants().len()
1896 let variant_infos: Vec<_> = adt_def
1899 .map(|(i, variant_def)| {
1900 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1902 Some(variant_def.name),
1904 layout.for_variant(self, i),
1911 match tag_encoding {
1912 TagEncoding::Direct => Some(tag.size(self)),
1922 /// Type size "skeleton", i.e., the only information determining a type's size.
1923 /// While this is conservative, (aside from constant sizes, only pointers,
1924 /// newtypes thereof and null pointer optimized enums are allowed), it is
1925 /// enough to statically check common use cases of transmute.
1926 #[derive(Copy, Clone, Debug)]
1927 pub enum SizeSkeleton<'tcx> {
1928 /// Any statically computable Layout.
1931 /// A potentially-fat pointer.
1933 /// If true, this pointer is never null.
1935 /// The type which determines the unsized metadata, if any,
1936 /// of this pointer. Either a type parameter or a projection
1937 /// depending on one, with regions erased.
1942 impl<'tcx> SizeSkeleton<'tcx> {
1946 param_env: ty::ParamEnv<'tcx>,
1947 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1948 debug_assert!(!ty.has_infer_types_or_consts());
1950 // First try computing a static layout.
1951 let err = match tcx.layout_of(param_env.and(ty)) {
1953 return Ok(SizeSkeleton::Known(layout.size));
1959 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1960 let non_zero = !ty.is_unsafe_ptr();
1961 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1963 ty::Param(_) | ty::Projection(_) => {
1964 debug_assert!(tail.has_param_types_or_consts());
1965 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1968 "SizeSkeleton::compute({}): layout errored ({}), yet \
1969 tail `{}` is not a type parameter or a projection",
1977 ty::Adt(def, substs) => {
1978 // Only newtypes and enums w/ nullable pointer optimization.
1979 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1983 // Get a zero-sized variant or a pointer newtype.
1984 let zero_or_ptr_variant = |i| {
1985 let i = VariantIdx::new(i);
1987 def.variant(i).fields.iter().map(|field| {
1988 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1991 for field in fields {
1994 SizeSkeleton::Known(size) => {
1995 if size.bytes() > 0 {
1999 SizeSkeleton::Pointer { .. } => {
2010 let v0 = zero_or_ptr_variant(0)?;
2012 if def.variants().len() == 1 {
2013 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2014 return Ok(SizeSkeleton::Pointer {
2016 || match tcx.layout_scalar_valid_range(def.did()) {
2017 (Bound::Included(start), Bound::Unbounded) => start > 0,
2018 (Bound::Included(start), Bound::Included(end)) => {
2019 0 < start && start < end
2030 let v1 = zero_or_ptr_variant(1)?;
2031 // Nullable pointer enum optimization.
2033 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2034 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2035 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2041 ty::Projection(_) | ty::Opaque(..) => {
2042 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2043 if ty == normalized {
2046 SizeSkeleton::compute(normalized, tcx, param_env)
2054 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2055 match (self, other) {
2056 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2057 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2065 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2066 fn tcx(&self) -> TyCtxt<'tcx>;
2069 pub trait HasParamEnv<'tcx> {
2070 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2073 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2075 fn data_layout(&self) -> &TargetDataLayout {
2080 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2081 fn target_spec(&self) -> &Target {
2086 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2088 fn tcx(&self) -> TyCtxt<'tcx> {
2093 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2095 fn data_layout(&self) -> &TargetDataLayout {
2100 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2101 fn target_spec(&self) -> &Target {
2106 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2108 fn tcx(&self) -> TyCtxt<'tcx> {
2113 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2114 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2119 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2120 fn data_layout(&self) -> &TargetDataLayout {
2121 self.tcx.data_layout()
2125 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2126 fn target_spec(&self) -> &Target {
2127 self.tcx.target_spec()
2131 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2132 fn tcx(&self) -> TyCtxt<'tcx> {
2137 pub trait MaybeResult<T> {
2140 fn from(x: Result<T, Self::Error>) -> Self;
2141 fn to_result(self) -> Result<T, Self::Error>;
2144 impl<T> MaybeResult<T> for T {
2147 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2150 fn to_result(self) -> Result<T, Self::Error> {
2155 impl<T, E> MaybeResult<T> for Result<T, E> {
2158 fn from(x: Result<T, Self::Error>) -> Self {
2161 fn to_result(self) -> Result<T, Self::Error> {
2166 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2168 /// Trait for contexts that want to be able to compute layouts of types.
2169 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2170 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2171 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2172 /// returned from `layout_of` (see also `handle_layout_err`).
2173 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2175 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2176 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2178 fn layout_tcx_at_span(&self) -> Span {
2182 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2183 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2185 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2186 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2187 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2188 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2189 fn handle_layout_err(
2191 err: LayoutError<'tcx>,
2194 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2197 /// Blanket extension trait for contexts that can compute layouts of types.
2198 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2199 /// Computes the layout of a type. Note that this implicitly
2200 /// executes in "reveal all" mode, and will normalize the input type.
2202 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2203 self.spanned_layout_of(ty, DUMMY_SP)
2206 /// Computes the layout of a type, at `span`. Note that this implicitly
2207 /// executes in "reveal all" mode, and will normalize the input type.
2208 // FIXME(eddyb) avoid passing information like this, and instead add more
2209 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2211 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2212 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2213 let tcx = self.tcx().at(span);
2216 tcx.layout_of(self.param_env().and(ty))
2217 .map_err(|err| self.handle_layout_err(err, span, ty)),
2222 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2224 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2225 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2228 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2233 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2234 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2237 fn layout_tcx_at_span(&self) -> Span {
2242 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2247 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2249 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2251 fn ty_and_layout_for_variant(
2252 this: TyAndLayout<'tcx>,
2254 variant_index: VariantIdx,
2255 ) -> TyAndLayout<'tcx> {
2256 let layout = match this.variants {
2257 Variants::Single { index }
2258 // If all variants but one are uninhabited, the variant layout is the enum layout.
2259 if index == variant_index &&
2260 // Don't confuse variants of uninhabited enums with the enum itself.
2261 // For more details see https://github.com/rust-lang/rust/issues/69763.
2262 this.fields != FieldsShape::Primitive =>
2267 Variants::Single { index } => {
2269 let param_env = cx.param_env();
2271 // Deny calling for_variant more than once for non-Single enums.
2272 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2273 assert_eq!(original_layout.variants, Variants::Single { index });
2276 let fields = match this.ty.kind() {
2277 ty::Adt(def, _) if def.variants().is_empty() =>
2278 bug!("for_variant called on zero-variant enum"),
2279 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2282 tcx.intern_layout(LayoutS {
2283 variants: Variants::Single { index: variant_index },
2284 fields: match NonZeroUsize::new(fields) {
2285 Some(fields) => FieldsShape::Union(fields),
2286 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2288 abi: Abi::Uninhabited,
2289 largest_niche: None,
2290 align: tcx.data_layout.i8_align,
2295 Variants::Multiple { ref variants, .. } => variants[variant_index],
2298 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2300 TyAndLayout { ty: this.ty, layout }
2303 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2304 enum TyMaybeWithLayout<'tcx> {
2306 TyAndLayout(TyAndLayout<'tcx>),
2309 fn field_ty_or_layout<'tcx>(
2310 this: TyAndLayout<'tcx>,
2311 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2313 ) -> TyMaybeWithLayout<'tcx> {
2315 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2317 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2318 ty: tag.primitive().to_ty(tcx),
2322 match *this.ty.kind() {
2331 | ty::GeneratorWitness(..)
2333 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2335 // Potentially-fat pointers.
2336 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2337 assert!(i < this.fields.count());
2339 // Reuse the fat `*T` type as its own thin pointer data field.
2340 // This provides information about, e.g., DST struct pointees
2341 // (which may have no non-DST form), and will work as long
2342 // as the `Abi` or `FieldsShape` is checked by users.
2344 let nil = tcx.mk_unit();
2345 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2348 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2351 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2352 // the `Result` should always work because the type is
2353 // always either `*mut ()` or `&'static mut ()`.
2354 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2356 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2360 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2361 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2362 ty::Dynamic(_, _) => {
2363 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2364 tcx.lifetimes.re_static,
2365 tcx.mk_array(tcx.types.usize, 3),
2367 /* FIXME: use actual fn pointers
2368 Warning: naively computing the number of entries in the
2369 vtable by counting the methods on the trait + methods on
2370 all parent traits does not work, because some methods can
2371 be not object safe and thus excluded from the vtable.
2372 Increase this counter if you tried to implement this but
2373 failed to do it without duplicating a lot of code from
2374 other places in the compiler: 2
2376 tcx.mk_array(tcx.types.usize, 3),
2377 tcx.mk_array(Option<fn()>),
2381 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2385 // Arrays and slices.
2386 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2387 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2389 // Tuples, generators and closures.
2390 ty::Closure(_, ref substs) => field_ty_or_layout(
2391 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2396 ty::Generator(def_id, ref substs, _) => match this.variants {
2397 Variants::Single { index } => TyMaybeWithLayout::Ty(
2400 .state_tys(def_id, tcx)
2401 .nth(index.as_usize())
2406 Variants::Multiple { tag, tag_field, .. } => {
2408 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2410 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2414 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2417 ty::Adt(def, substs) => {
2418 match this.variants {
2419 Variants::Single { index } => {
2420 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2423 // Discriminant field for enums (where applicable).
2424 Variants::Multiple { tag, .. } => {
2426 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2433 | ty::Placeholder(..)
2437 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2441 match field_ty_or_layout(this, cx, i) {
2442 TyMaybeWithLayout::Ty(field_ty) => {
2443 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2445 "failed to get layout for `{}`: {},\n\
2446 despite it being a field (#{}) of an existing layout: {:#?}",
2454 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2458 fn ty_and_layout_pointee_info_at(
2459 this: TyAndLayout<'tcx>,
2462 ) -> Option<PointeeInfo> {
2464 let param_env = cx.param_env();
2466 let addr_space_of_ty = |ty: Ty<'tcx>| {
2467 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2470 let pointee_info = match *this.ty.kind() {
2471 ty::RawPtr(mt) if offset.bytes() == 0 => {
2472 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2474 align: layout.align.abi,
2476 address_space: addr_space_of_ty(mt.ty),
2479 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2480 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2482 align: layout.align.abi,
2484 address_space: cx.data_layout().instruction_address_space,
2487 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2488 let address_space = addr_space_of_ty(ty);
2489 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2490 // Use conservative pointer kind if not optimizing. This saves us the
2491 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2492 // attributes in LLVM have compile-time cost even in unoptimized builds).
2496 hir::Mutability::Not => {
2497 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2503 hir::Mutability::Mut => {
2504 // References to self-referential structures should not be considered
2505 // noalias, as another pointer to the structure can be obtained, that
2506 // is not based-on the original reference. We consider all !Unpin
2507 // types to be potentially self-referential here.
2508 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2509 PointerKind::UniqueBorrowed
2517 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2519 align: layout.align.abi,
2526 let mut data_variant = match this.variants {
2527 // Within the discriminant field, only the niche itself is
2528 // always initialized, so we only check for a pointer at its
2531 // If the niche is a pointer, it's either valid (according
2532 // to its type), or null (which the niche field's scalar
2533 // validity range encodes). This allows using
2534 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2535 // this will continue to work as long as we don't start
2536 // using more niches than just null (e.g., the first page of
2537 // the address space, or unaligned pointers).
2538 Variants::Multiple {
2539 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2542 } if this.fields.offset(tag_field) == offset => {
2543 Some(this.for_variant(cx, dataful_variant))
2548 if let Some(variant) = data_variant {
2549 // We're not interested in any unions.
2550 if let FieldsShape::Union(_) = variant.fields {
2551 data_variant = None;
2555 let mut result = None;
2557 if let Some(variant) = data_variant {
2558 let ptr_end = offset + Pointer.size(cx);
2559 for i in 0..variant.fields.count() {
2560 let field_start = variant.fields.offset(i);
2561 if field_start <= offset {
2562 let field = variant.field(cx, i);
2563 result = field.to_result().ok().and_then(|field| {
2564 if ptr_end <= field_start + field.size {
2565 // We found the right field, look inside it.
2567 field.pointee_info_at(cx, offset - field_start);
2573 if result.is_some() {
2580 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2581 if let Some(ref mut pointee) = result {
2582 if let ty::Adt(def, _) = this.ty.kind() {
2583 if def.is_box() && offset.bytes() == 0 {
2584 pointee.safe = Some(PointerKind::UniqueOwned);
2594 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2604 impl<'tcx> ty::Instance<'tcx> {
2605 // NOTE(eddyb) this is private to avoid using it from outside of
2606 // `fn_abi_of_instance` - any other uses are either too high-level
2607 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2608 // or should go through `FnAbi` instead, to avoid losing any
2609 // adjustments `fn_abi_of_instance` might be performing.
2610 fn fn_sig_for_fn_abi(
2613 param_env: ty::ParamEnv<'tcx>,
2614 ) -> ty::PolyFnSig<'tcx> {
2615 let ty = self.ty(tcx, param_env);
2618 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2619 // parameters unused if they show up in the signature, but not in the `mir::Body`
2620 // (i.e. due to being inside a projection that got normalized, see
2621 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2622 // track of a polymorphization `ParamEnv` to allow normalizing later.
2623 let mut sig = match *ty.kind() {
2624 ty::FnDef(def_id, substs) => tcx
2625 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2626 .subst(tcx, substs),
2627 _ => unreachable!(),
2630 if let ty::InstanceDef::VtableShim(..) = self.def {
2631 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2632 sig = sig.map_bound(|mut sig| {
2633 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2634 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2635 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2641 ty::Closure(def_id, substs) => {
2642 let sig = substs.as_closure().sig();
2644 let bound_vars = tcx.mk_bound_variable_kinds(
2647 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2649 let br = ty::BoundRegion {
2650 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2651 kind: ty::BoundRegionKind::BrEnv,
2653 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2654 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2656 let sig = sig.skip_binder();
2657 ty::Binder::bind_with_vars(
2659 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2668 ty::Generator(_, substs, _) => {
2669 let sig = substs.as_generator().poly_sig();
2671 let bound_vars = tcx.mk_bound_variable_kinds(
2674 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2676 let br = ty::BoundRegion {
2677 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2678 kind: ty::BoundRegionKind::BrEnv,
2680 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2681 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2683 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2684 let pin_adt_ref = tcx.adt_def(pin_did);
2685 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2686 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2688 let sig = sig.skip_binder();
2689 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2690 let state_adt_ref = tcx.adt_def(state_did);
2691 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2692 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2693 ty::Binder::bind_with_vars(
2695 [env_ty, sig.resume_ty].iter(),
2698 hir::Unsafety::Normal,
2699 rustc_target::spec::abi::Abi::Rust,
2704 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2709 /// Calculates whether a function's ABI can unwind or not.
2711 /// This takes two primary parameters:
2713 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2714 /// codegen attrs for a defined function. For function pointers this set of
2715 /// flags is the empty set. This is only applicable for Rust-defined
2716 /// functions, and generally isn't needed except for small optimizations where
2717 /// we try to say a function which otherwise might look like it could unwind
2718 /// doesn't actually unwind (such as for intrinsics and such).
2720 /// * `abi` - this is the ABI that the function is defined with. This is the
2721 /// primary factor for determining whether a function can unwind or not.
2723 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2724 /// panics are implemented with unwinds on most platform (when
2725 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2726 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2727 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2728 /// defined for each ABI individually, but it always corresponds to some form of
2729 /// stack-based unwinding (the exact mechanism of which varies
2730 /// platform-by-platform).
2732 /// Rust functions are classified whether or not they can unwind based on the
2733 /// active "panic strategy". In other words Rust functions are considered to
2734 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2735 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2736 /// only if the final panic mode is panic=abort. In this scenario any code
2737 /// previously compiled assuming that a function can unwind is still correct, it
2738 /// just never happens to actually unwind at runtime.
2740 /// This function's answer to whether or not a function can unwind is quite
2741 /// impactful throughout the compiler. This affects things like:
2743 /// * Calling a function which can't unwind means codegen simply ignores any
2744 /// associated unwinding cleanup.
2745 /// * Calling a function which can unwind from a function which can't unwind
2746 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2747 /// aborts the process.
2748 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2749 /// affects various optimizations and codegen.
2751 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2752 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2753 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2754 /// might (from a foreign exception or similar).
2756 pub fn fn_can_unwind<'tcx>(
2758 codegen_fn_attr_flags: CodegenFnAttrFlags,
2761 // Special attribute for functions which can't unwind.
2762 if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2766 // Otherwise if this isn't special then unwinding is generally determined by
2767 // the ABI of the itself. ABIs like `C` have variants which also
2768 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2769 // ABIs have such an option. Otherwise the only other thing here is Rust
2770 // itself, and those ABIs are determined by the panic strategy configured
2771 // for this compilation.
2773 // Unfortunately at this time there's also another caveat. Rust [RFC
2774 // 2945][rfc] has been accepted and is in the process of being implemented
2775 // and stabilized. In this interim state we need to deal with historical
2776 // rustc behavior as well as plan for future rustc behavior.
2778 // Historically functions declared with `extern "C"` were marked at the
2779 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2780 // or not. This is UB for functions in `panic=unwind` mode that then
2781 // actually panic and unwind. Note that this behavior is true for both
2782 // externally declared functions as well as Rust-defined function.
2784 // To fix this UB rustc would like to change in the future to catch unwinds
2785 // from function calls that may unwind within a Rust-defined `extern "C"`
2786 // function and forcibly abort the process, thereby respecting the
2787 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2788 // ready to roll out, so determining whether or not the `C` family of ABIs
2789 // unwinds is conditional not only on their definition but also whether the
2790 // `#![feature(c_unwind)]` feature gate is active.
2792 // Note that this means that unlike historical compilers rustc now, by
2793 // default, unconditionally thinks that the `C` ABI may unwind. This will
2794 // prevent some optimization opportunities, however, so we try to scope this
2795 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2796 // to `panic=abort`).
2798 // Eventually the check against `c_unwind` here will ideally get removed and
2799 // this'll be a little cleaner as it'll be a straightforward check of the
2802 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2808 | Stdcall { unwind }
2809 | Fastcall { unwind }
2810 | Vectorcall { unwind }
2811 | Thiscall { unwind }
2814 | SysV64 { unwind } => {
2816 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2824 | AvrNonBlockingInterrupt
2825 | CCmseNonSecureCall
2829 | Unadjusted => false,
2830 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2835 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2836 use rustc_target::spec::abi::Abi::*;
2837 match tcx.sess.target.adjust_abi(abi) {
2838 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2840 // It's the ABI's job to select this, not ours.
2841 System { .. } => bug!("system abi should be selected elsewhere"),
2842 EfiApi => bug!("eficall abi should be selected elsewhere"),
2844 Stdcall { .. } => Conv::X86Stdcall,
2845 Fastcall { .. } => Conv::X86Fastcall,
2846 Vectorcall { .. } => Conv::X86VectorCall,
2847 Thiscall { .. } => Conv::X86ThisCall,
2848 C { .. } => Conv::C,
2849 Unadjusted => Conv::C,
2850 Win64 { .. } => Conv::X86_64Win64,
2851 SysV64 { .. } => Conv::X86_64SysV,
2852 Aapcs { .. } => Conv::ArmAapcs,
2853 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2854 PtxKernel => Conv::PtxKernel,
2855 Msp430Interrupt => Conv::Msp430Intr,
2856 X86Interrupt => Conv::X86Intr,
2857 AmdGpuKernel => Conv::AmdGpuKernel,
2858 AvrInterrupt => Conv::AvrInterrupt,
2859 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2862 // These API constants ought to be more specific...
2863 Cdecl { .. } => Conv::C,
2867 /// Error produced by attempting to compute or adjust a `FnAbi`.
2868 #[derive(Copy, Clone, Debug, HashStable)]
2869 pub enum FnAbiError<'tcx> {
2870 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2871 Layout(LayoutError<'tcx>),
2873 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2874 AdjustForForeignAbi(call::AdjustForForeignAbiError),
2877 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2878 fn from(err: LayoutError<'tcx>) -> Self {
2883 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2884 fn from(err: call::AdjustForForeignAbiError) -> Self {
2885 Self::AdjustForForeignAbi(err)
2889 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2890 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2892 Self::Layout(err) => err.fmt(f),
2893 Self::AdjustForForeignAbi(err) => err.fmt(f),
2898 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2899 // just for error handling.
2901 pub enum FnAbiRequest<'tcx> {
2902 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2903 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2906 /// Trait for contexts that want to be able to compute `FnAbi`s.
2907 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2908 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2909 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2910 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2911 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2913 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2914 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2916 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2917 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2918 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2919 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2920 fn handle_fn_abi_err(
2922 err: FnAbiError<'tcx>,
2924 fn_abi_request: FnAbiRequest<'tcx>,
2925 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2928 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2929 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2930 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2932 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2933 /// instead, where the instance is an `InstanceDef::Virtual`.
2935 fn fn_abi_of_fn_ptr(
2937 sig: ty::PolyFnSig<'tcx>,
2938 extra_args: &'tcx ty::List<Ty<'tcx>>,
2939 ) -> Self::FnAbiOfResult {
2940 // FIXME(eddyb) get a better `span` here.
2941 let span = self.layout_tcx_at_span();
2942 let tcx = self.tcx().at(span);
2944 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2945 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2949 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2950 /// direct calls to an `fn`.
2952 /// NB: that includes virtual calls, which are represented by "direct calls"
2953 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2955 fn fn_abi_of_instance(
2957 instance: ty::Instance<'tcx>,
2958 extra_args: &'tcx ty::List<Ty<'tcx>>,
2959 ) -> Self::FnAbiOfResult {
2960 // FIXME(eddyb) get a better `span` here.
2961 let span = self.layout_tcx_at_span();
2962 let tcx = self.tcx().at(span);
2965 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2966 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2967 // we can get some kind of span even if one wasn't provided.
2968 // However, we don't do this early in order to avoid calling
2969 // `def_span` unconditionally (which may have a perf penalty).
2970 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2971 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2977 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2979 fn fn_abi_of_fn_ptr<'tcx>(
2981 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2982 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2983 let (param_env, (sig, extra_args)) = query.into_parts();
2985 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2989 CodegenFnAttrFlags::empty(),
2994 fn fn_abi_of_instance<'tcx>(
2996 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2997 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2998 let (param_env, (instance, extra_args)) = query.into_parts();
3000 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3002 let caller_location = if instance.def.requires_caller_location(tcx) {
3003 Some(tcx.caller_location_ty())
3008 let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
3010 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3015 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3019 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3020 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3021 // arguments of this method, into a separate `struct`.
3022 fn fn_abi_new_uncached(
3024 sig: ty::PolyFnSig<'tcx>,
3025 extra_args: &[Ty<'tcx>],
3026 caller_location: Option<Ty<'tcx>>,
3027 codegen_fn_attr_flags: CodegenFnAttrFlags,
3028 // FIXME(eddyb) replace this with something typed, like an `enum`.
3029 force_thin_self_ptr: bool,
3030 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3031 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3033 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3035 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3037 let mut inputs = sig.inputs();
3038 let extra_args = if sig.abi == RustCall {
3039 assert!(!sig.c_variadic && extra_args.is_empty());
3041 if let Some(input) = sig.inputs().last() {
3042 if let ty::Tuple(tupled_arguments) = input.kind() {
3043 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3047 "argument to function with \"rust-call\" ABI \
3053 "argument to function with \"rust-call\" ABI \
3058 assert!(sig.c_variadic || extra_args.is_empty());
3062 let target = &self.tcx.sess.target;
3063 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3064 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3065 let linux_s390x_gnu_like =
3066 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3067 let linux_sparc64_gnu_like =
3068 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3069 let linux_powerpc_gnu_like =
3070 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3072 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3074 // Handle safe Rust thin and fat pointers.
3075 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3077 layout: TyAndLayout<'tcx>,
3080 // Booleans are always a noundef i1 that needs to be zero-extended.
3081 if scalar.is_bool() {
3082 attrs.ext(ArgExtension::Zext);
3083 attrs.set(ArgAttribute::NoUndef);
3087 // Scalars which have invalid values cannot be undef.
3088 if !scalar.is_always_valid(self) {
3089 attrs.set(ArgAttribute::NoUndef);
3092 // Only pointer types handled below.
3093 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3095 if !valid_range.contains(0) {
3096 attrs.set(ArgAttribute::NonNull);
3099 if let Some(pointee) = layout.pointee_info_at(self, offset) {
3100 if let Some(kind) = pointee.safe {
3101 attrs.pointee_align = Some(pointee.align);
3103 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3104 // for the entire duration of the function as they can be deallocated
3105 // at any time. Set their valid size to 0.
3106 attrs.pointee_size = match kind {
3107 PointerKind::UniqueOwned => Size::ZERO,
3111 // `Box`, `&T`, and `&mut T` cannot be undef.
3112 // Note that this only applies to the value of the pointer itself;
3113 // this attribute doesn't make it UB for the pointed-to data to be undef.
3114 attrs.set(ArgAttribute::NoUndef);
3116 // `Box` pointer parameters never alias because ownership is transferred
3117 // `&mut` pointer parameters never alias other parameters,
3118 // or mutable global data
3120 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3121 // and can be marked as both `readonly` and `noalias`, as
3122 // LLVM's definition of `noalias` is based solely on memory
3123 // dependencies rather than pointer equality
3125 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3126 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3127 // or not to actually emit the attribute. It can also be controlled with the
3128 // `-Zmutable-noalias` debugging option.
3129 let no_alias = match kind {
3130 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3131 PointerKind::UniqueOwned => true,
3132 PointerKind::Frozen => !is_return,
3135 attrs.set(ArgAttribute::NoAlias);
3138 if kind == PointerKind::Frozen && !is_return {
3139 attrs.set(ArgAttribute::ReadOnly);
3142 if kind == PointerKind::UniqueBorrowed && !is_return {
3143 attrs.set(ArgAttribute::NoAliasMutRef);
3149 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3150 let is_return = arg_idx.is_none();
3152 let layout = self.layout_of(ty)?;
3153 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3154 // Don't pass the vtable, it's not an argument of the virtual fn.
3155 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3156 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3157 make_thin_self_ptr(self, layout)
3162 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3163 let mut attrs = ArgAttributes::new();
3164 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3168 if arg.layout.is_zst() {
3169 // For some forsaken reason, x86_64-pc-windows-gnu
3170 // doesn't ignore zero-sized struct arguments.
3171 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3175 && !linux_s390x_gnu_like
3176 && !linux_sparc64_gnu_like
3177 && !linux_powerpc_gnu_like)
3179 arg.mode = PassMode::Ignore;
3186 let mut fn_abi = FnAbi {
3187 ret: arg_of(sig.output(), None)?,
3191 .chain(extra_args.iter().copied())
3192 .chain(caller_location)
3194 .map(|(i, ty)| arg_of(ty, Some(i)))
3195 .collect::<Result<_, _>>()?,
3196 c_variadic: sig.c_variadic,
3197 fixed_count: inputs.len(),
3199 can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3201 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3202 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3203 Ok(self.tcx.arena.alloc(fn_abi))
3206 fn fn_abi_adjust_for_abi(
3208 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3210 ) -> Result<(), FnAbiError<'tcx>> {
3211 if abi == SpecAbi::Unadjusted {
3215 if abi == SpecAbi::Rust
3216 || abi == SpecAbi::RustCall
3217 || abi == SpecAbi::RustIntrinsic
3218 || abi == SpecAbi::PlatformIntrinsic
3220 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3221 if arg.is_ignore() {
3225 match arg.layout.abi {
3226 Abi::Aggregate { .. } => {}
3228 // This is a fun case! The gist of what this is doing is
3229 // that we want callers and callees to always agree on the
3230 // ABI of how they pass SIMD arguments. If we were to *not*
3231 // make these arguments indirect then they'd be immediates
3232 // in LLVM, which means that they'd used whatever the
3233 // appropriate ABI is for the callee and the caller. That
3234 // means, for example, if the caller doesn't have AVX
3235 // enabled but the callee does, then passing an AVX argument
3236 // across this boundary would cause corrupt data to show up.
3238 // This problem is fixed by unconditionally passing SIMD
3239 // arguments through memory between callers and callees
3240 // which should get them all to agree on ABI regardless of
3241 // target feature sets. Some more information about this
3242 // issue can be found in #44367.
3244 // Note that the platform intrinsic ABI is exempt here as
3245 // that's how we connect up to LLVM and it's unstable
3246 // anyway, we control all calls to it in libstd.
3248 if abi != SpecAbi::PlatformIntrinsic
3249 && self.tcx.sess.target.simd_types_indirect =>
3251 arg.make_indirect();
3258 let size = arg.layout.size;
3259 if arg.layout.is_unsized() || size > Pointer.size(self) {
3260 arg.make_indirect();
3262 // We want to pass small aggregates as immediates, but using
3263 // a LLVM aggregate type for this leads to bad optimizations,
3264 // so we pick an appropriately sized integer type instead.
3265 arg.cast_to(Reg { kind: RegKind::Integer, size });
3268 fixup(&mut fn_abi.ret);
3269 for arg in &mut fn_abi.args {
3273 fn_abi.adjust_for_foreign_abi(self, abi)?;
3280 fn make_thin_self_ptr<'tcx>(
3281 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3282 layout: TyAndLayout<'tcx>,
3283 ) -> TyAndLayout<'tcx> {
3285 let fat_pointer_ty = if layout.is_unsized() {
3286 // unsized `self` is passed as a pointer to `self`
3287 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3288 tcx.mk_mut_ptr(layout.ty)
3291 Abi::ScalarPair(..) => (),
3292 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3295 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3296 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3297 // elsewhere in the compiler as a method on a `dyn Trait`.
3298 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3299 // get a built-in pointer type
3300 let mut fat_pointer_layout = layout;
3301 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3302 && !fat_pointer_layout.ty.is_region_ptr()
3304 for i in 0..fat_pointer_layout.fields.count() {
3305 let field_layout = fat_pointer_layout.field(cx, i);
3307 if !field_layout.is_zst() {
3308 fat_pointer_layout = field_layout;
3309 continue 'descend_newtypes;
3313 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3316 fat_pointer_layout.ty
3319 // we now have a type like `*mut RcBox<dyn Trait>`
3320 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3321 // this is understood as a special case elsewhere in the compiler
3322 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3327 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3328 // should always work because the type is always `*mut ()`.
3329 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()