1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_attr as attr;
9 use rustc_hir::def_id::DefId;
10 use rustc_hir::lang_items::LangItem;
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::{Idx, IndexVec};
13 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::{Span, DUMMY_SP};
16 use rustc_target::abi::call::{
17 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
19 use rustc_target::abi::*;
20 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
25 use std::num::NonZeroUsize;
28 use rand::{seq::SliceRandom, SeedableRng};
29 use rand_xoshiro::Xoshiro128StarStar;
31 pub fn provide(providers: &mut ty::query::Providers) {
33 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
36 pub trait IntegerExt {
37 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
38 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
39 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
40 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
50 impl IntegerExt for Integer {
52 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
53 match (*self, signed) {
54 (I8, false) => tcx.types.u8,
55 (I16, false) => tcx.types.u16,
56 (I32, false) => tcx.types.u32,
57 (I64, false) => tcx.types.u64,
58 (I128, false) => tcx.types.u128,
59 (I8, true) => tcx.types.i8,
60 (I16, true) => tcx.types.i16,
61 (I32, true) => tcx.types.i32,
62 (I64, true) => tcx.types.i64,
63 (I128, true) => tcx.types.i128,
67 /// Gets the Integer type from an attr::IntType.
68 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
69 let dl = cx.data_layout();
72 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
73 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
74 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
75 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
76 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
77 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
78 dl.ptr_sized_integer()
83 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
86 ty::IntTy::I16 => I16,
87 ty::IntTy::I32 => I32,
88 ty::IntTy::I64 => I64,
89 ty::IntTy::I128 => I128,
90 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
93 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
96 ty::UintTy::U16 => I16,
97 ty::UintTy::U32 => I32,
98 ty::UintTy::U64 => I64,
99 ty::UintTy::U128 => I128,
100 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
104 /// Finds the appropriate Integer type and signedness for the given
105 /// signed discriminant range and `#[repr]` attribute.
106 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
107 /// that shouldn't affect anything, other than maybe debuginfo.
114 ) -> (Integer, bool) {
115 // Theoretically, negative values could be larger in unsigned representation
116 // than the unsigned representation of the signed minimum. However, if there
117 // are any negative values, the only valid unsigned representation is u128
118 // which can fit all i128 values, so the result remains unaffected.
119 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
120 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
122 if let Some(ity) = repr.int {
123 let discr = Integer::from_attr(&tcx, ity);
124 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
127 "Integer::repr_discr: `#[repr]` hint too small for \
128 discriminant range of enum `{}",
132 return (discr, ity.is_signed());
135 let at_least = if repr.c() {
136 // This is usually I32, however it can be different on some platforms,
137 // notably hexagon and arm-none/thumb-none
138 tcx.data_layout().c_enum_min_size
140 // repr(Rust) enums try to be as small as possible
144 // If there are no negative values, we can use the unsigned fit.
146 (cmp::max(unsigned_fit, at_least), false)
148 (cmp::max(signed_fit, at_least), true)
153 pub trait PrimitiveExt {
154 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 impl PrimitiveExt for Primitive {
160 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
162 Int(i, signed) => i.to_ty(tcx, signed),
163 F32 => tcx.types.f32,
164 F64 => tcx.types.f64,
165 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169 /// Return an *integer* type matching this primitive.
170 /// Useful in particular when dealing with enum discriminants.
172 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
174 Int(i, signed) => i.to_ty(tcx, signed),
175 Pointer => tcx.types.usize,
176 F32 | F64 => bug!("floats do not have an int type"),
181 /// The first half of a fat pointer.
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
187 /// The second half of a fat pointer.
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
193 /// The maximum supported number of lanes in a SIMD vector.
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
200 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
203 SizeOverflow(Ty<'tcx>),
204 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
207 impl<'tcx> fmt::Display for LayoutError<'tcx> {
208 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
210 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
211 LayoutError::SizeOverflow(ty) => {
212 write!(f, "values of the type `{}` are too big for the current architecture", ty)
214 LayoutError::NormalizationFailure(t, e) => write!(
216 "unable to determine layout for `{}` because `{}` cannot be normalized",
218 e.get_type_for_failure()
224 #[instrument(skip(tcx, query), level = "debug")]
227 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
228 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
229 ty::tls::with_related_context(tcx, move |icx| {
230 let (param_env, ty) = query.into_parts();
233 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
234 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
237 // Update the ImplicitCtxt to increase the layout_depth
238 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
240 ty::tls::enter_context(&icx, |_| {
241 let param_env = param_env.with_reveal_all_normalized(tcx);
242 let unnormalized_ty = ty;
244 // FIXME: We might want to have two different versions of `layout_of`:
245 // One that can be called after typecheck has completed and can use
246 // `normalize_erasing_regions` here and another one that can be called
247 // before typecheck has completed and uses `try_normalize_erasing_regions`.
248 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
250 Err(normalization_error) => {
251 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
255 if ty != unnormalized_ty {
256 // Ensure this layout is also cached for the normalized type.
257 return tcx.layout_of(param_env.and(ty));
260 let cx = LayoutCx { tcx, param_env };
262 let layout = cx.layout_of_uncached(ty)?;
263 let layout = TyAndLayout { ty, layout };
265 cx.record_layout_for_printing(layout);
267 // Type-level uninhabitedness should always imply ABI uninhabitedness.
268 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
269 assert!(layout.abi.is_uninhabited());
277 pub struct LayoutCx<'tcx, C> {
279 pub param_env: ty::ParamEnv<'tcx>,
282 #[derive(Copy, Clone, Debug)]
284 /// A tuple, closure, or univariant which cannot be coerced to unsized.
286 /// A univariant, the last field of which may be coerced to unsized.
288 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
289 Prefixed(Size, Align),
292 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
293 // This is used to go between `memory_index` (source field order to memory order)
294 // and `inverse_memory_index` (memory order to source field order).
295 // See also `FieldsShape::Arbitrary::memory_index` for more details.
296 // FIXME(eddyb) build a better abstraction for permutations, if possible.
297 fn invert_mapping(map: &[u32]) -> Vec<u32> {
298 let mut inverse = vec![0; map.len()];
299 for i in 0..map.len() {
300 inverse[map[i] as usize] = i as u32;
305 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
306 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
307 let dl = self.data_layout();
308 let b_align = b.align(dl);
309 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
310 let b_offset = a.size(dl).align_to(b_align.abi);
311 let size = (b_offset + b.size(dl)).align_to(align.abi);
313 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
314 // returns the last maximum.
315 let largest_niche = Niche::from_scalar(dl, b_offset, b)
317 .chain(Niche::from_scalar(dl, Size::ZERO, a))
318 .max_by_key(|niche| niche.available(dl));
321 variants: Variants::Single { index: VariantIdx::new(0) },
322 fields: FieldsShape::Arbitrary {
323 offsets: vec![Size::ZERO, b_offset],
324 memory_index: vec![0, 1],
326 abi: Abi::ScalarPair(a, b),
333 fn univariant_uninterned(
336 fields: &[TyAndLayout<'_>],
339 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
340 let dl = self.data_layout();
341 let pack = repr.pack;
342 if pack.is_some() && repr.align.is_some() {
343 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
344 return Err(LayoutError::Unknown(ty));
347 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
349 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
351 let optimize = !repr.inhibit_struct_field_reordering_opt();
354 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
355 let optimizing = &mut inverse_memory_index[..end];
356 let field_align = |f: &TyAndLayout<'_>| {
357 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
360 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
361 // the field ordering to try and catch some code making assumptions about layouts
362 // we don't guarantee
363 if repr.can_randomize_type_layout() {
364 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
365 // randomize field ordering with
366 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
368 // Shuffle the ordering of the fields
369 optimizing.shuffle(&mut rng);
371 // Otherwise we just leave things alone and actually optimize the type's fields
374 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
375 optimizing.sort_by_key(|&x| {
376 // Place ZSTs first to avoid "interesting offsets",
377 // especially with only one or two non-ZST fields.
378 let f = &fields[x as usize];
379 (!f.is_zst(), cmp::Reverse(field_align(f)))
383 StructKind::Prefixed(..) => {
384 // Sort in ascending alignment so that the layout stays optimal
385 // regardless of the prefix
386 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
390 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
391 // regardless of the status of `-Z randomize-layout`
395 // inverse_memory_index holds field indices by increasing memory offset.
396 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
397 // We now write field offsets to the corresponding offset slot;
398 // field 5 with offset 0 puts 0 in offsets[5].
399 // At the bottom of this function, we invert `inverse_memory_index` to
400 // produce `memory_index` (see `invert_mapping`).
402 let mut sized = true;
403 let mut offsets = vec![Size::ZERO; fields.len()];
404 let mut offset = Size::ZERO;
405 let mut largest_niche = None;
406 let mut largest_niche_available = 0;
408 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
410 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
411 align = align.max(AbiAndPrefAlign::new(prefix_align));
412 offset = prefix_size.align_to(prefix_align);
415 for &i in &inverse_memory_index {
416 let field = fields[i as usize];
418 self.tcx.sess.delay_span_bug(
421 "univariant: field #{} of `{}` comes after unsized field",
428 if field.is_unsized() {
432 // Invariant: offset < dl.obj_size_bound() <= 1<<61
433 let field_align = if let Some(pack) = pack {
434 field.align.min(AbiAndPrefAlign::new(pack))
438 offset = offset.align_to(field_align.abi);
439 align = align.max(field_align);
441 debug!("univariant offset: {:?} field: {:#?}", offset, field);
442 offsets[i as usize] = offset;
444 if !repr.hide_niche() {
445 if let Some(mut niche) = field.largest_niche {
446 let available = niche.available(dl);
447 if available > largest_niche_available {
448 largest_niche_available = available;
449 niche.offset += offset;
450 largest_niche = Some(niche);
455 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
458 if let Some(repr_align) = repr.align {
459 align = align.max(AbiAndPrefAlign::new(repr_align));
462 debug!("univariant min_size: {:?}", offset);
463 let min_size = offset;
465 // As stated above, inverse_memory_index holds field indices by increasing offset.
466 // This makes it an already-sorted view of the offsets vec.
467 // To invert it, consider:
468 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
469 // Field 5 would be the first element, so memory_index is i:
470 // Note: if we didn't optimize, it's already right.
473 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
475 let size = min_size.align_to(align.abi);
476 let mut abi = Abi::Aggregate { sized };
478 // Unpack newtype ABIs and find scalar pairs.
479 if sized && size.bytes() > 0 {
480 // All other fields must be ZSTs.
481 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
483 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
484 // We have exactly one non-ZST field.
485 (Some((i, field)), None, None) => {
486 // Field fills the struct and it has a scalar or scalar pair ABI.
487 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
490 // For plain scalars, or vectors of them, we can't unpack
491 // newtypes for `#[repr(C)]`, as that affects C ABIs.
492 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
495 // But scalar pairs are Rust-specific and get
496 // treated as aggregates by C ABIs anyway.
497 Abi::ScalarPair(..) => {
505 // Two non-ZST fields, and they're both scalars.
506 (Some((i, a)), Some((j, b)), None) => {
507 match (a.abi, b.abi) {
508 (Abi::Scalar(a), Abi::Scalar(b)) => {
509 // Order by the memory placement, not source order.
510 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
515 let pair = self.scalar_pair(a, b);
516 let pair_offsets = match pair.fields {
517 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
518 assert_eq!(memory_index, &[0, 1]);
523 if offsets[i] == pair_offsets[0]
524 && offsets[j] == pair_offsets[1]
525 && align == pair.align
528 // We can use `ScalarPair` only when it matches our
529 // already computed layout (including `#[repr(C)]`).
541 if fields.iter().any(|f| f.abi.is_uninhabited()) {
542 abi = Abi::Uninhabited;
546 variants: Variants::Single { index: VariantIdx::new(0) },
547 fields: FieldsShape::Arbitrary { offsets, memory_index },
555 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
557 let param_env = self.param_env;
558 let dl = self.data_layout();
559 let scalar_unit = |value: Primitive| {
560 let size = value.size(dl);
561 assert!(size.bits() <= 128);
562 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
565 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
567 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
568 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
570 debug_assert!(!ty.has_infer_types_or_consts());
572 Ok(match *ty.kind() {
574 ty::Bool => tcx.intern_layout(LayoutS::scalar(
576 Scalar::Initialized {
577 value: Int(I8, false),
578 valid_range: WrappingRange { start: 0, end: 1 },
581 ty::Char => tcx.intern_layout(LayoutS::scalar(
583 Scalar::Initialized {
584 value: Int(I32, false),
585 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
588 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
589 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
590 ty::Float(fty) => scalar(match fty {
591 ty::FloatTy::F32 => F32,
592 ty::FloatTy::F64 => F64,
595 let mut ptr = scalar_unit(Pointer);
596 ptr.valid_range_mut().start = 1;
597 tcx.intern_layout(LayoutS::scalar(self, ptr))
601 ty::Never => tcx.intern_layout(LayoutS {
602 variants: Variants::Single { index: VariantIdx::new(0) },
603 fields: FieldsShape::Primitive,
604 abi: Abi::Uninhabited,
610 // Potentially-wide pointers.
611 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
612 let mut data_ptr = scalar_unit(Pointer);
613 if !ty.is_unsafe_ptr() {
614 data_ptr.valid_range_mut().start = 1;
617 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
618 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
619 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
622 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
623 let metadata = match unsized_part.kind() {
625 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
627 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
629 let mut vtable = scalar_unit(Pointer);
630 vtable.valid_range_mut().start = 1;
633 _ => return Err(LayoutError::Unknown(unsized_part)),
636 // Effectively a (ptr, meta) tuple.
637 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
640 // Arrays and slices.
641 ty::Array(element, mut count) => {
642 if count.has_projections() {
643 count = tcx.normalize_erasing_regions(param_env, count);
644 if count.has_projections() {
645 return Err(LayoutError::Unknown(ty));
649 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
650 let element = self.layout_of(element)?;
652 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
655 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
658 Abi::Aggregate { sized: true }
661 let largest_niche = if count != 0 { element.largest_niche } else { None };
663 tcx.intern_layout(LayoutS {
664 variants: Variants::Single { index: VariantIdx::new(0) },
665 fields: FieldsShape::Array { stride: element.size, count },
668 align: element.align,
672 ty::Slice(element) => {
673 let element = self.layout_of(element)?;
674 tcx.intern_layout(LayoutS {
675 variants: Variants::Single { index: VariantIdx::new(0) },
676 fields: FieldsShape::Array { stride: element.size, count: 0 },
677 abi: Abi::Aggregate { sized: false },
679 align: element.align,
683 ty::Str => tcx.intern_layout(LayoutS {
684 variants: Variants::Single { index: VariantIdx::new(0) },
685 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
686 abi: Abi::Aggregate { sized: false },
693 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
694 ty::Dynamic(..) | ty::Foreign(..) => {
695 let mut unit = self.univariant_uninterned(
698 &ReprOptions::default(),
699 StructKind::AlwaysSized,
702 Abi::Aggregate { ref mut sized } => *sized = false,
705 tcx.intern_layout(unit)
708 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
710 ty::Closure(_, ref substs) => {
711 let tys = substs.as_closure().upvar_tys();
713 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
714 &ReprOptions::default(),
715 StructKind::AlwaysSized,
721 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
724 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
725 &ReprOptions::default(),
730 // SIMD vector types.
731 ty::Adt(def, substs) if def.repr().simd() => {
732 if !def.is_struct() {
733 // Should have yielded E0517 by now.
734 tcx.sess.delay_span_bug(
736 "#[repr(simd)] was applied to an ADT that is not a struct",
738 return Err(LayoutError::Unknown(ty));
741 // Supported SIMD vectors are homogeneous ADTs with at least one field:
743 // * #[repr(simd)] struct S(T, T, T, T);
744 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
745 // * #[repr(simd)] struct S([T; 4])
747 // where T is a primitive scalar (integer/float/pointer).
749 // SIMD vectors with zero fields are not supported.
750 // (should be caught by typeck)
751 if def.non_enum_variant().fields.is_empty() {
752 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
755 // Type of the first ADT field:
756 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
758 // Heterogeneous SIMD vectors are not supported:
759 // (should be caught by typeck)
760 for fi in &def.non_enum_variant().fields {
761 if fi.ty(tcx, substs) != f0_ty {
762 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
766 // The element type and number of elements of the SIMD vector
767 // are obtained from:
769 // * the element type and length of the single array field, if
770 // the first field is of array type, or
772 // * the homogenous field type and the number of fields.
773 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
774 // First ADT field is an array:
776 // SIMD vectors with multiple array fields are not supported:
777 // (should be caught by typeck)
778 if def.non_enum_variant().fields.len() != 1 {
779 tcx.sess.fatal(&format!(
780 "monomorphising SIMD type `{}` with more than one array field",
785 // Extract the number of elements from the layout of the array field:
786 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
787 return Err(LayoutError::Unknown(ty));
790 (*e_ty, *count, true)
792 // First ADT field is not an array:
793 (f0_ty, def.non_enum_variant().fields.len() as _, false)
796 // SIMD vectors of zero length are not supported.
797 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
800 // Can't be caught in typeck if the array length is generic.
802 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
803 } else if e_len > MAX_SIMD_LANES {
804 tcx.sess.fatal(&format!(
805 "monomorphising SIMD type `{}` of length greater than {}",
810 // Compute the ABI of the element type:
811 let e_ly = self.layout_of(e_ty)?;
812 let Abi::Scalar(e_abi) = e_ly.abi else {
813 // This error isn't caught in typeck, e.g., if
814 // the element type of the vector is generic.
815 tcx.sess.fatal(&format!(
816 "monomorphising SIMD type `{}` with a non-primitive-scalar \
817 (integer/float/pointer) element type `{}`",
822 // Compute the size and alignment of the vector:
823 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
824 let align = dl.vector_align(size);
825 let size = size.align_to(align.abi);
827 // Compute the placement of the vector fields:
828 let fields = if is_array {
829 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
831 FieldsShape::Array { stride: e_ly.size, count: e_len }
834 tcx.intern_layout(LayoutS {
835 variants: Variants::Single { index: VariantIdx::new(0) },
837 abi: Abi::Vector { element: e_abi, count: e_len },
838 largest_niche: e_ly.largest_niche,
845 ty::Adt(def, substs) => {
846 // Cache the field layouts.
853 .map(|field| self.layout_of(field.ty(tcx, substs)))
854 .collect::<Result<Vec<_>, _>>()
856 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
859 if def.repr().pack.is_some() && def.repr().align.is_some() {
860 self.tcx.sess.delay_span_bug(
861 tcx.def_span(def.did()),
862 "union cannot be packed and aligned",
864 return Err(LayoutError::Unknown(ty));
868 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
870 if let Some(repr_align) = def.repr().align {
871 align = align.max(AbiAndPrefAlign::new(repr_align));
874 let optimize = !def.repr().inhibit_union_abi_opt();
875 let mut size = Size::ZERO;
876 let mut abi = Abi::Aggregate { sized: true };
877 let index = VariantIdx::new(0);
878 for field in &variants[index] {
879 assert!(!field.is_unsized());
880 align = align.max(field.align);
882 // If all non-ZST fields have the same ABI, forward this ABI
883 if optimize && !field.is_zst() {
884 // Discard valid range information and allow undef
885 let field_abi = match field.abi {
886 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
887 Abi::ScalarPair(x, y) => {
888 Abi::ScalarPair(x.to_union(), y.to_union())
890 Abi::Vector { element: x, count } => {
891 Abi::Vector { element: x.to_union(), count }
893 Abi::Uninhabited | Abi::Aggregate { .. } => {
894 Abi::Aggregate { sized: true }
898 if size == Size::ZERO {
899 // first non ZST: initialize 'abi'
901 } else if abi != field_abi {
902 // different fields have different ABI: reset to Aggregate
903 abi = Abi::Aggregate { sized: true };
907 size = cmp::max(size, field.size);
910 if let Some(pack) = def.repr().pack {
911 align = align.min(AbiAndPrefAlign::new(pack));
914 return Ok(tcx.intern_layout(LayoutS {
915 variants: Variants::Single { index },
916 fields: FieldsShape::Union(
917 NonZeroUsize::new(variants[index].len())
918 .ok_or(LayoutError::Unknown(ty))?,
923 size: size.align_to(align.abi),
927 // A variant is absent if it's uninhabited and only has ZST fields.
928 // Present uninhabited variants only require space for their fields,
929 // but *not* an encoding of the discriminant (e.g., a tag value).
930 // See issue #49298 for more details on the need to leave space
931 // for non-ZST uninhabited data (mostly partial initialization).
932 let absent = |fields: &[TyAndLayout<'_>]| {
933 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
934 let is_zst = fields.iter().all(|f| f.is_zst());
935 uninhabited && is_zst
937 let (present_first, present_second) = {
938 let mut present_variants = variants
940 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
941 (present_variants.next(), present_variants.next())
943 let present_first = match present_first {
944 Some(present_first) => present_first,
945 // Uninhabited because it has no variants, or only absent ones.
946 None if def.is_enum() => {
947 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
949 // If it's a struct, still compute a layout so that we can still compute the
951 None => VariantIdx::new(0),
954 let is_struct = !def.is_enum() ||
955 // Only one variant is present.
956 (present_second.is_none() &&
957 // Representation optimizations are allowed.
958 !def.repr().inhibit_enum_layout_opt());
960 // Struct, or univariant enum equivalent to a struct.
961 // (Typechecking will reject discriminant-sizing attrs.)
963 let v = present_first;
964 let kind = if def.is_enum() || variants[v].is_empty() {
965 StructKind::AlwaysSized
967 let param_env = tcx.param_env(def.did());
968 let last_field = def.variant(v).fields.last().unwrap();
970 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
972 StructKind::MaybeUnsized
974 StructKind::AlwaysSized
978 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
979 st.variants = Variants::Single { index: v };
980 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
982 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
983 // the asserts ensure that we are not using the
984 // `#[rustc_layout_scalar_valid_range(n)]`
985 // attribute to widen the range of anything as that would probably
986 // result in UB somewhere
987 // FIXME(eddyb) the asserts are probably not needed,
988 // as larger validity ranges would result in missed
989 // optimizations, *not* wrongly assuming the inner
990 // value is valid. e.g. unions enlarge validity ranges,
991 // because the values may be uninitialized.
992 if let Bound::Included(start) = start {
993 // FIXME(eddyb) this might be incorrect - it doesn't
994 // account for wrap-around (end < start) ranges.
995 let valid_range = scalar.valid_range_mut();
996 assert!(valid_range.start <= start);
997 valid_range.start = start;
999 if let Bound::Included(end) = end {
1000 // FIXME(eddyb) this might be incorrect - it doesn't
1001 // account for wrap-around (end < start) ranges.
1002 let valid_range = scalar.valid_range_mut();
1003 assert!(valid_range.end >= end);
1004 valid_range.end = end;
1007 // Update `largest_niche` if we have introduced a larger niche.
1008 let niche = if def.repr().hide_niche() {
1011 Niche::from_scalar(dl, Size::ZERO, *scalar)
1013 if let Some(niche) = niche {
1014 match st.largest_niche {
1015 Some(largest_niche) => {
1016 // Replace the existing niche even if they're equal,
1017 // because this one is at a lower offset.
1018 if largest_niche.available(dl) <= niche.available(dl) {
1019 st.largest_niche = Some(niche);
1022 None => st.largest_niche = Some(niche),
1027 start == Bound::Unbounded && end == Bound::Unbounded,
1028 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1034 return Ok(tcx.intern_layout(st));
1037 // At this point, we have handled all unions and
1038 // structs. (We have also handled univariant enums
1039 // that allow representation optimization.)
1040 assert!(def.is_enum());
1042 // The current code for niche-filling relies on variant indices
1043 // instead of actual discriminants, so dataful enums with
1044 // explicit discriminants (RFC #2363) would misbehave.
1045 let no_explicit_discriminants = def
1048 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1050 let mut niche_filling_layout = None;
1052 // Niche-filling enum optimization.
1053 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1054 let mut dataful_variant = None;
1055 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1057 // Find one non-ZST variant.
1058 'variants: for (v, fields) in variants.iter_enumerated() {
1064 if dataful_variant.is_none() {
1065 dataful_variant = Some(v);
1068 dataful_variant = None;
1073 niche_variants = *niche_variants.start().min(&v)..=v;
1076 if niche_variants.start() > niche_variants.end() {
1077 dataful_variant = None;
1080 if let Some(i) = dataful_variant {
1081 let count = (niche_variants.end().as_u32()
1082 - niche_variants.start().as_u32()
1085 // Find the field with the largest niche
1086 let niche_candidate = variants[i]
1089 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1090 .max_by_key(|(_, niche)| niche.available(dl));
1092 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1093 niche_candidate.and_then(|(field_index, niche)| {
1094 Some((field_index, niche, niche.reserve(self, count)?))
1097 let mut align = dl.aggregate_align;
1101 let mut st = self.univariant_uninterned(
1105 StructKind::AlwaysSized,
1107 st.variants = Variants::Single { index: j };
1109 align = align.max(st.align);
1111 Ok(tcx.intern_layout(st))
1113 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1115 let offset = st[i].fields().offset(field_index) + niche.offset;
1116 let size = st[i].size();
1118 let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1122 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1123 Abi::ScalarPair(first, second) => {
1124 // Only the niche is guaranteed to be initialised,
1125 // so use union layout for the other primitive.
1126 if offset.bytes() == 0 {
1127 Abi::ScalarPair(niche_scalar, second.to_union())
1129 Abi::ScalarPair(first.to_union(), niche_scalar)
1132 _ => Abi::Aggregate { sized: true },
1136 let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1138 niche_filling_layout = Some(LayoutS {
1139 variants: Variants::Multiple {
1141 tag_encoding: TagEncoding::Niche {
1149 fields: FieldsShape::Arbitrary {
1150 offsets: vec![offset],
1151 memory_index: vec![0],
1162 let (mut min, mut max) = (i128::MAX, i128::MIN);
1163 let discr_type = def.repr().discr_type();
1164 let bits = Integer::from_attr(self, discr_type).size().bits();
1165 for (i, discr) in def.discriminants(tcx) {
1166 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1169 let mut x = discr.val as i128;
1170 if discr_type.is_signed() {
1171 // sign extend the raw representation to be an i128
1172 x = (x << (128 - bits)) >> (128 - bits);
1181 // We might have no inhabited variants, so pretend there's at least one.
1182 if (min, max) == (i128::MAX, i128::MIN) {
1186 assert!(min <= max, "discriminant range is {}...{}", min, max);
1187 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1189 let mut align = dl.aggregate_align;
1190 let mut size = Size::ZERO;
1192 // We're interested in the smallest alignment, so start large.
1193 let mut start_align = Align::from_bytes(256).unwrap();
1194 assert_eq!(Integer::for_align(dl, start_align), None);
1196 // repr(C) on an enum tells us to make a (tag, union) layout,
1197 // so we need to grow the prefix alignment to be at least
1198 // the alignment of the union. (This value is used both for
1199 // determining the alignment of the overall enum, and the
1200 // determining the alignment of the payload after the tag.)
1201 let mut prefix_align = min_ity.align(dl).abi;
1203 for fields in &variants {
1204 for field in fields {
1205 prefix_align = prefix_align.max(field.align.abi);
1210 // Create the set of structs that represent each variant.
1211 let mut layout_variants = variants
1213 .map(|(i, field_layouts)| {
1214 let mut st = self.univariant_uninterned(
1218 StructKind::Prefixed(min_ity.size(), prefix_align),
1220 st.variants = Variants::Single { index: i };
1221 // Find the first field we can't move later
1222 // to make room for a larger discriminant.
1224 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1226 if !field.is_zst() || field.align.abi.bytes() != 1 {
1227 start_align = start_align.min(field.align.abi);
1231 size = cmp::max(size, st.size);
1232 align = align.max(st.align);
1235 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1237 // Align the maximum variant size to the largest alignment.
1238 size = size.align_to(align.abi);
1240 if size.bytes() >= dl.obj_size_bound() {
1241 return Err(LayoutError::SizeOverflow(ty));
1244 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1245 if typeck_ity < min_ity {
1246 // It is a bug if Layout decided on a greater discriminant size than typeck for
1247 // some reason at this point (based on values discriminant can take on). Mostly
1248 // because this discriminant will be loaded, and then stored into variable of
1249 // type calculated by typeck. Consider such case (a bug): typeck decided on
1250 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1251 // discriminant values. That would be a bug, because then, in codegen, in order
1252 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1253 // space necessary to represent would have to be discarded (or layout is wrong
1254 // on thinking it needs 16 bits)
1256 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1260 // However, it is fine to make discr type however large (as an optimisation)
1261 // after this point – we’ll just truncate the value we load in codegen.
1264 // Check to see if we should use a different type for the
1265 // discriminant. We can safely use a type with the same size
1266 // as the alignment of the first field of each variant.
1267 // We increase the size of the discriminant to avoid LLVM copying
1268 // padding when it doesn't need to. This normally causes unaligned
1269 // load/stores and excessive memcpy/memset operations. By using a
1270 // bigger integer size, LLVM can be sure about its contents and
1271 // won't be so conservative.
1273 // Use the initial field alignment
1274 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1277 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1280 // If the alignment is not larger than the chosen discriminant size,
1281 // don't use the alignment as the final size.
1285 // Patch up the variants' first few fields.
1286 let old_ity_size = min_ity.size();
1287 let new_ity_size = ity.size();
1288 for variant in &mut layout_variants {
1289 match variant.fields {
1290 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1292 if *i <= old_ity_size {
1293 assert_eq!(*i, old_ity_size);
1297 // We might be making the struct larger.
1298 if variant.size <= old_ity_size {
1299 variant.size = new_ity_size;
1307 let tag_mask = ity.size().unsigned_int_max();
1308 let tag = Scalar::Initialized {
1309 value: Int(ity, signed),
1310 valid_range: WrappingRange {
1311 start: (min as u128 & tag_mask),
1312 end: (max as u128 & tag_mask),
1315 let mut abi = Abi::Aggregate { sized: true };
1317 // Without latter check aligned enums with custom discriminant values
1318 // Would result in ICE see the issue #92464 for more info
1319 if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1320 abi = Abi::Scalar(tag);
1322 // Try to use a ScalarPair for all tagged enums.
1323 let mut common_prim = None;
1324 let mut common_prim_initialized_in_all_variants = true;
1325 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1326 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1330 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1331 let (field, offset) = match (fields.next(), fields.next()) {
1333 common_prim_initialized_in_all_variants = false;
1336 (Some(pair), None) => pair,
1342 let prim = match field.abi {
1343 Abi::Scalar(scalar) => {
1344 common_prim_initialized_in_all_variants &=
1345 matches!(scalar, Scalar::Initialized { .. });
1353 if let Some(pair) = common_prim {
1354 // This is pretty conservative. We could go fancier
1355 // by conflating things like i32 and u32, or even
1356 // realising that (u8, u8) could just cohabit with
1358 if pair != (prim, offset) {
1363 common_prim = Some((prim, offset));
1366 if let Some((prim, offset)) = common_prim {
1367 let prim_scalar = if common_prim_initialized_in_all_variants {
1370 // Common prim might be uninit.
1371 Scalar::Union { value: prim }
1373 let pair = self.scalar_pair(tag, prim_scalar);
1374 let pair_offsets = match pair.fields {
1375 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1376 assert_eq!(memory_index, &[0, 1]);
1381 if pair_offsets[0] == Size::ZERO
1382 && pair_offsets[1] == *offset
1383 && align == pair.align
1384 && size == pair.size
1386 // We can use `ScalarPair` only when it matches our
1387 // already computed layout (including `#[repr(C)]`).
1393 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1394 abi = Abi::Uninhabited;
1397 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1399 let layout_variants =
1400 layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1402 let tagged_layout = LayoutS {
1403 variants: Variants::Multiple {
1405 tag_encoding: TagEncoding::Direct,
1407 variants: layout_variants,
1409 fields: FieldsShape::Arbitrary {
1410 offsets: vec![Size::ZERO],
1411 memory_index: vec![0],
1419 let best_layout = match (tagged_layout, niche_filling_layout) {
1420 (tagged_layout, Some(niche_filling_layout)) => {
1421 // Pick the smaller layout; otherwise,
1422 // pick the layout with the larger niche; otherwise,
1423 // pick tagged as it has simpler codegen.
1424 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1425 let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1426 (layout.size, cmp::Reverse(niche_size))
1429 (tagged_layout, None) => tagged_layout,
1432 tcx.intern_layout(best_layout)
1435 // Types with no meaningful known layout.
1436 ty::Projection(_) | ty::Opaque(..) => {
1437 // NOTE(eddyb) `layout_of` query should've normalized these away,
1438 // if that was possible, so there's no reason to try again here.
1439 return Err(LayoutError::Unknown(ty));
1442 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1443 bug!("Layout::compute: unexpected type `{}`", ty)
1446 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1447 return Err(LayoutError::Unknown(ty));
1453 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1454 #[derive(Clone, Debug, PartialEq)]
1455 enum SavedLocalEligibility {
1457 Assigned(VariantIdx),
1458 // FIXME: Use newtype_index so we aren't wasting bytes
1459 Ineligible(Option<u32>),
1462 // When laying out generators, we divide our saved local fields into two
1463 // categories: overlap-eligible and overlap-ineligible.
1465 // Those fields which are ineligible for overlap go in a "prefix" at the
1466 // beginning of the layout, and always have space reserved for them.
1468 // Overlap-eligible fields are only assigned to one variant, so we lay
1469 // those fields out for each variant and put them right after the
1472 // Finally, in the layout details, we point to the fields from the
1473 // variants they are assigned to. It is possible for some fields to be
1474 // included in multiple variants. No field ever "moves around" in the
1475 // layout; its offset is always the same.
1477 // Also included in the layout are the upvars and the discriminant.
1478 // These are included as fields on the "outer" layout; they are not part
1480 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1481 /// Compute the eligibility and assignment of each local.
1482 fn generator_saved_local_eligibility(
1484 info: &GeneratorLayout<'tcx>,
1485 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1486 use SavedLocalEligibility::*;
1488 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1489 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1491 // The saved locals not eligible for overlap. These will get
1492 // "promoted" to the prefix of our generator.
1493 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1495 // Figure out which of our saved locals are fields in only
1496 // one variant. The rest are deemed ineligible for overlap.
1497 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1498 for local in fields {
1499 match assignments[*local] {
1501 assignments[*local] = Assigned(variant_index);
1504 // We've already seen this local at another suspension
1505 // point, so it is no longer a candidate.
1507 "removing local {:?} in >1 variant ({:?}, {:?})",
1512 ineligible_locals.insert(*local);
1513 assignments[*local] = Ineligible(None);
1520 // Next, check every pair of eligible locals to see if they
1522 for local_a in info.storage_conflicts.rows() {
1523 let conflicts_a = info.storage_conflicts.count(local_a);
1524 if ineligible_locals.contains(local_a) {
1528 for local_b in info.storage_conflicts.iter(local_a) {
1529 // local_a and local_b are storage live at the same time, therefore they
1530 // cannot overlap in the generator layout. The only way to guarantee
1531 // this is if they are in the same variant, or one is ineligible
1532 // (which means it is stored in every variant).
1533 if ineligible_locals.contains(local_b)
1534 || assignments[local_a] == assignments[local_b]
1539 // If they conflict, we will choose one to make ineligible.
1540 // This is not always optimal; it's just a greedy heuristic that
1541 // seems to produce good results most of the time.
1542 let conflicts_b = info.storage_conflicts.count(local_b);
1543 let (remove, other) =
1544 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1545 ineligible_locals.insert(remove);
1546 assignments[remove] = Ineligible(None);
1547 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1551 // Count the number of variants in use. If only one of them, then it is
1552 // impossible to overlap any locals in our layout. In this case it's
1553 // always better to make the remaining locals ineligible, so we can
1554 // lay them out with the other locals in the prefix and eliminate
1555 // unnecessary padding bytes.
1557 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1558 for assignment in &assignments {
1559 if let Assigned(idx) = assignment {
1560 used_variants.insert(*idx);
1563 if used_variants.count() < 2 {
1564 for assignment in assignments.iter_mut() {
1565 *assignment = Ineligible(None);
1567 ineligible_locals.insert_all();
1571 // Write down the order of our locals that will be promoted to the prefix.
1573 for (idx, local) in ineligible_locals.iter().enumerate() {
1574 assignments[local] = Ineligible(Some(idx as u32));
1577 debug!("generator saved local assignments: {:?}", assignments);
1579 (ineligible_locals, assignments)
1582 /// Compute the full generator layout.
1583 fn generator_layout(
1586 def_id: hir::def_id::DefId,
1587 substs: SubstsRef<'tcx>,
1588 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1589 use SavedLocalEligibility::*;
1591 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1593 let Some(info) = tcx.generator_layout(def_id) else {
1594 return Err(LayoutError::Unknown(ty));
1596 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1598 // Build a prefix layout, including "promoting" all ineligible
1599 // locals as part of the prefix. We compute the layout of all of
1600 // these fields at once to get optimal packing.
1601 let tag_index = substs.as_generator().prefix_tys().count();
1603 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1604 let max_discr = (info.variant_fields.len() - 1) as u128;
1605 let discr_int = Integer::fit_unsigned(max_discr);
1606 let discr_int_ty = discr_int.to_ty(tcx, false);
1607 let tag = Scalar::Initialized {
1608 value: Primitive::Int(discr_int, false),
1609 valid_range: WrappingRange { start: 0, end: max_discr },
1611 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1612 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1614 let promoted_layouts = ineligible_locals
1616 .map(|local| subst_field(info.field_tys[local]))
1617 .map(|ty| tcx.mk_maybe_uninit(ty))
1618 .map(|ty| self.layout_of(ty));
1619 let prefix_layouts = substs
1622 .map(|ty| self.layout_of(ty))
1623 .chain(iter::once(Ok(tag_layout)))
1624 .chain(promoted_layouts)
1625 .collect::<Result<Vec<_>, _>>()?;
1626 let prefix = self.univariant_uninterned(
1629 &ReprOptions::default(),
1630 StructKind::AlwaysSized,
1633 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1635 // Split the prefix layout into the "outer" fields (upvars and
1636 // discriminant) and the "promoted" fields. Promoted fields will
1637 // get included in each variant that requested them in
1639 debug!("prefix = {:#?}", prefix);
1640 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1641 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1642 let mut inverse_memory_index = invert_mapping(&memory_index);
1644 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1645 // "outer" and "promoted" fields respectively.
1646 let b_start = (tag_index + 1) as u32;
1647 let offsets_b = offsets.split_off(b_start as usize);
1648 let offsets_a = offsets;
1650 // Disentangle the "a" and "b" components of `inverse_memory_index`
1651 // by preserving the order but keeping only one disjoint "half" each.
1652 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1653 let inverse_memory_index_b: Vec<_> =
1654 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1655 inverse_memory_index.retain(|&i| i < b_start);
1656 let inverse_memory_index_a = inverse_memory_index;
1658 // Since `inverse_memory_index_{a,b}` each only refer to their
1659 // respective fields, they can be safely inverted
1660 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1661 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1664 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1665 (outer_fields, offsets_b, memory_index_b)
1670 let mut size = prefix.size;
1671 let mut align = prefix.align;
1675 .map(|(index, variant_fields)| {
1676 // Only include overlap-eligible fields when we compute our variant layout.
1677 let variant_only_tys = variant_fields
1679 .filter(|local| match assignments[**local] {
1680 Unassigned => bug!(),
1681 Assigned(v) if v == index => true,
1682 Assigned(_) => bug!("assignment does not match variant"),
1683 Ineligible(_) => false,
1685 .map(|local| subst_field(info.field_tys[*local]));
1687 let mut variant = self.univariant_uninterned(
1690 .map(|ty| self.layout_of(ty))
1691 .collect::<Result<Vec<_>, _>>()?,
1692 &ReprOptions::default(),
1693 StructKind::Prefixed(prefix_size, prefix_align.abi),
1695 variant.variants = Variants::Single { index };
1697 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1701 // Now, stitch the promoted and variant-only fields back together in
1702 // the order they are mentioned by our GeneratorLayout.
1703 // Because we only use some subset (that can differ between variants)
1704 // of the promoted fields, we can't just pick those elements of the
1705 // `promoted_memory_index` (as we'd end up with gaps).
1706 // So instead, we build an "inverse memory_index", as if all of the
1707 // promoted fields were being used, but leave the elements not in the
1708 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1709 // obtain a valid (bijective) mapping.
1710 const INVALID_FIELD_IDX: u32 = !0;
1711 let mut combined_inverse_memory_index =
1712 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1713 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1714 let combined_offsets = variant_fields
1718 let (offset, memory_index) = match assignments[*local] {
1719 Unassigned => bug!(),
1721 let (offset, memory_index) =
1722 offsets_and_memory_index.next().unwrap();
1723 (offset, promoted_memory_index.len() as u32 + memory_index)
1725 Ineligible(field_idx) => {
1726 let field_idx = field_idx.unwrap() as usize;
1727 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1730 combined_inverse_memory_index[memory_index as usize] = i as u32;
1735 // Remove the unused slots and invert the mapping to obtain the
1736 // combined `memory_index` (also see previous comment).
1737 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1738 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1740 variant.fields = FieldsShape::Arbitrary {
1741 offsets: combined_offsets,
1742 memory_index: combined_memory_index,
1745 size = size.max(variant.size);
1746 align = align.max(variant.align);
1747 Ok(tcx.intern_layout(variant))
1749 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1751 size = size.align_to(align.abi);
1754 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1757 Abi::Aggregate { sized: true }
1760 let layout = tcx.intern_layout(LayoutS {
1761 variants: Variants::Multiple {
1763 tag_encoding: TagEncoding::Direct,
1764 tag_field: tag_index,
1767 fields: outer_fields,
1769 largest_niche: prefix.largest_niche,
1773 debug!("generator layout ({:?}): {:#?}", ty, layout);
1777 /// This is invoked by the `layout_of` query to record the final
1778 /// layout of each type.
1780 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1781 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1782 // for dumping later.
1783 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1784 self.record_layout_for_printing_outlined(layout)
1788 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1789 // Ignore layouts that are done with non-empty environments or
1790 // non-monomorphic layouts, as the user only wants to see the stuff
1791 // resulting from the final codegen session.
1792 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1796 // (delay format until we actually need it)
1797 let record = |kind, packed, opt_discr_size, variants| {
1798 let type_desc = format!("{:?}", layout.ty);
1799 self.tcx.sess.code_stats.record_type_size(
1810 let adt_def = match *layout.ty.kind() {
1811 ty::Adt(ref adt_def, _) => {
1812 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1816 ty::Closure(..) => {
1817 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1818 record(DataTypeKind::Closure, false, None, vec![]);
1823 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1828 let adt_kind = adt_def.adt_kind();
1829 let adt_packed = adt_def.repr().pack.is_some();
1831 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1832 let mut min_size = Size::ZERO;
1833 let field_info: Vec<_> = flds
1837 let field_layout = layout.field(self, i);
1838 let offset = layout.fields.offset(i);
1839 let field_end = offset + field_layout.size;
1840 if min_size < field_end {
1841 min_size = field_end;
1844 name: name.to_string(),
1845 offset: offset.bytes(),
1846 size: field_layout.size.bytes(),
1847 align: field_layout.align.abi.bytes(),
1853 name: n.map(|n| n.to_string()),
1854 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1855 align: layout.align.abi.bytes(),
1856 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1861 match layout.variants {
1862 Variants::Single { index } => {
1863 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1865 "print-type-size `{:#?}` variant {}",
1867 adt_def.variant(index).name
1869 let variant_def = &adt_def.variant(index);
1870 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1875 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1878 // (This case arises for *empty* enums; so give it
1880 record(adt_kind.into(), adt_packed, None, vec![]);
1884 Variants::Multiple { tag, ref tag_encoding, .. } => {
1886 "print-type-size `{:#?}` adt general variants def {}",
1888 adt_def.variants().len()
1890 let variant_infos: Vec<_> = adt_def
1893 .map(|(i, variant_def)| {
1894 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1896 Some(variant_def.name),
1898 layout.for_variant(self, i),
1905 match tag_encoding {
1906 TagEncoding::Direct => Some(tag.size(self)),
1916 /// Type size "skeleton", i.e., the only information determining a type's size.
1917 /// While this is conservative, (aside from constant sizes, only pointers,
1918 /// newtypes thereof and null pointer optimized enums are allowed), it is
1919 /// enough to statically check common use cases of transmute.
1920 #[derive(Copy, Clone, Debug)]
1921 pub enum SizeSkeleton<'tcx> {
1922 /// Any statically computable Layout.
1925 /// A potentially-fat pointer.
1927 /// If true, this pointer is never null.
1929 /// The type which determines the unsized metadata, if any,
1930 /// of this pointer. Either a type parameter or a projection
1931 /// depending on one, with regions erased.
1936 impl<'tcx> SizeSkeleton<'tcx> {
1940 param_env: ty::ParamEnv<'tcx>,
1941 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1942 debug_assert!(!ty.has_infer_types_or_consts());
1944 // First try computing a static layout.
1945 let err = match tcx.layout_of(param_env.and(ty)) {
1947 return Ok(SizeSkeleton::Known(layout.size));
1953 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1954 let non_zero = !ty.is_unsafe_ptr();
1955 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1957 ty::Param(_) | ty::Projection(_) => {
1958 debug_assert!(tail.has_param_types_or_consts());
1959 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1962 "SizeSkeleton::compute({}): layout errored ({}), yet \
1963 tail `{}` is not a type parameter or a projection",
1971 ty::Adt(def, substs) => {
1972 // Only newtypes and enums w/ nullable pointer optimization.
1973 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1977 // Get a zero-sized variant or a pointer newtype.
1978 let zero_or_ptr_variant = |i| {
1979 let i = VariantIdx::new(i);
1981 def.variant(i).fields.iter().map(|field| {
1982 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1985 for field in fields {
1988 SizeSkeleton::Known(size) => {
1989 if size.bytes() > 0 {
1993 SizeSkeleton::Pointer { .. } => {
2004 let v0 = zero_or_ptr_variant(0)?;
2006 if def.variants().len() == 1 {
2007 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2008 return Ok(SizeSkeleton::Pointer {
2010 || match tcx.layout_scalar_valid_range(def.did()) {
2011 (Bound::Included(start), Bound::Unbounded) => start > 0,
2012 (Bound::Included(start), Bound::Included(end)) => {
2013 0 < start && start < end
2024 let v1 = zero_or_ptr_variant(1)?;
2025 // Nullable pointer enum optimization.
2027 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2028 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2029 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2035 ty::Projection(_) | ty::Opaque(..) => {
2036 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2037 if ty == normalized {
2040 SizeSkeleton::compute(normalized, tcx, param_env)
2048 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2049 match (self, other) {
2050 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2051 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2059 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2060 fn tcx(&self) -> TyCtxt<'tcx>;
2063 pub trait HasParamEnv<'tcx> {
2064 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2067 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2069 fn data_layout(&self) -> &TargetDataLayout {
2074 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2075 fn target_spec(&self) -> &Target {
2080 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2082 fn tcx(&self) -> TyCtxt<'tcx> {
2087 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2089 fn data_layout(&self) -> &TargetDataLayout {
2094 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2095 fn target_spec(&self) -> &Target {
2100 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2102 fn tcx(&self) -> TyCtxt<'tcx> {
2107 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2108 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2113 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2114 fn data_layout(&self) -> &TargetDataLayout {
2115 self.tcx.data_layout()
2119 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2120 fn target_spec(&self) -> &Target {
2121 self.tcx.target_spec()
2125 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2126 fn tcx(&self) -> TyCtxt<'tcx> {
2131 pub trait MaybeResult<T> {
2134 fn from(x: Result<T, Self::Error>) -> Self;
2135 fn to_result(self) -> Result<T, Self::Error>;
2138 impl<T> MaybeResult<T> for T {
2141 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2144 fn to_result(self) -> Result<T, Self::Error> {
2149 impl<T, E> MaybeResult<T> for Result<T, E> {
2152 fn from(x: Result<T, Self::Error>) -> Self {
2155 fn to_result(self) -> Result<T, Self::Error> {
2160 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2162 /// Trait for contexts that want to be able to compute layouts of types.
2163 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2164 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2165 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2166 /// returned from `layout_of` (see also `handle_layout_err`).
2167 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2169 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2170 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2172 fn layout_tcx_at_span(&self) -> Span {
2176 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2177 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2179 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2180 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2181 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2182 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2183 fn handle_layout_err(
2185 err: LayoutError<'tcx>,
2188 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2191 /// Blanket extension trait for contexts that can compute layouts of types.
2192 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2193 /// Computes the layout of a type. Note that this implicitly
2194 /// executes in "reveal all" mode, and will normalize the input type.
2196 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2197 self.spanned_layout_of(ty, DUMMY_SP)
2200 /// Computes the layout of a type, at `span`. Note that this implicitly
2201 /// executes in "reveal all" mode, and will normalize the input type.
2202 // FIXME(eddyb) avoid passing information like this, and instead add more
2203 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2205 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2206 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2207 let tcx = self.tcx().at(span);
2210 tcx.layout_of(self.param_env().and(ty))
2211 .map_err(|err| self.handle_layout_err(err, span, ty)),
2216 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2218 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2219 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2222 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2227 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2228 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2231 fn layout_tcx_at_span(&self) -> Span {
2236 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2241 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2243 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2245 fn ty_and_layout_for_variant(
2246 this: TyAndLayout<'tcx>,
2248 variant_index: VariantIdx,
2249 ) -> TyAndLayout<'tcx> {
2250 let layout = match this.variants {
2251 Variants::Single { index }
2252 // If all variants but one are uninhabited, the variant layout is the enum layout.
2253 if index == variant_index &&
2254 // Don't confuse variants of uninhabited enums with the enum itself.
2255 // For more details see https://github.com/rust-lang/rust/issues/69763.
2256 this.fields != FieldsShape::Primitive =>
2261 Variants::Single { index } => {
2263 let param_env = cx.param_env();
2265 // Deny calling for_variant more than once for non-Single enums.
2266 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2267 assert_eq!(original_layout.variants, Variants::Single { index });
2270 let fields = match this.ty.kind() {
2271 ty::Adt(def, _) if def.variants().is_empty() =>
2272 bug!("for_variant called on zero-variant enum"),
2273 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2276 tcx.intern_layout(LayoutS {
2277 variants: Variants::Single { index: variant_index },
2278 fields: match NonZeroUsize::new(fields) {
2279 Some(fields) => FieldsShape::Union(fields),
2280 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2282 abi: Abi::Uninhabited,
2283 largest_niche: None,
2284 align: tcx.data_layout.i8_align,
2289 Variants::Multiple { ref variants, .. } => variants[variant_index],
2292 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2294 TyAndLayout { ty: this.ty, layout }
2297 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2298 enum TyMaybeWithLayout<'tcx> {
2300 TyAndLayout(TyAndLayout<'tcx>),
2303 fn field_ty_or_layout<'tcx>(
2304 this: TyAndLayout<'tcx>,
2305 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2307 ) -> TyMaybeWithLayout<'tcx> {
2309 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2311 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2312 ty: tag.primitive().to_ty(tcx),
2316 match *this.ty.kind() {
2325 | ty::GeneratorWitness(..)
2327 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2329 // Potentially-fat pointers.
2330 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2331 assert!(i < this.fields.count());
2333 // Reuse the fat `*T` type as its own thin pointer data field.
2334 // This provides information about, e.g., DST struct pointees
2335 // (which may have no non-DST form), and will work as long
2336 // as the `Abi` or `FieldsShape` is checked by users.
2338 let nil = tcx.mk_unit();
2339 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2342 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2345 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2346 // the `Result` should always work because the type is
2347 // always either `*mut ()` or `&'static mut ()`.
2348 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2350 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2354 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2355 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2356 ty::Dynamic(_, _) => {
2357 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2358 tcx.lifetimes.re_static,
2359 tcx.mk_array(tcx.types.usize, 3),
2361 /* FIXME: use actual fn pointers
2362 Warning: naively computing the number of entries in the
2363 vtable by counting the methods on the trait + methods on
2364 all parent traits does not work, because some methods can
2365 be not object safe and thus excluded from the vtable.
2366 Increase this counter if you tried to implement this but
2367 failed to do it without duplicating a lot of code from
2368 other places in the compiler: 2
2370 tcx.mk_array(tcx.types.usize, 3),
2371 tcx.mk_array(Option<fn()>),
2375 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2379 // Arrays and slices.
2380 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2381 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2383 // Tuples, generators and closures.
2384 ty::Closure(_, ref substs) => field_ty_or_layout(
2385 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2390 ty::Generator(def_id, ref substs, _) => match this.variants {
2391 Variants::Single { index } => TyMaybeWithLayout::Ty(
2394 .state_tys(def_id, tcx)
2395 .nth(index.as_usize())
2400 Variants::Multiple { tag, tag_field, .. } => {
2402 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2404 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2408 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2411 ty::Adt(def, substs) => {
2412 match this.variants {
2413 Variants::Single { index } => {
2414 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2417 // Discriminant field for enums (where applicable).
2418 Variants::Multiple { tag, .. } => {
2420 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2427 | ty::Placeholder(..)
2431 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2435 match field_ty_or_layout(this, cx, i) {
2436 TyMaybeWithLayout::Ty(field_ty) => {
2437 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2439 "failed to get layout for `{}`: {},\n\
2440 despite it being a field (#{}) of an existing layout: {:#?}",
2448 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2452 fn ty_and_layout_pointee_info_at(
2453 this: TyAndLayout<'tcx>,
2456 ) -> Option<PointeeInfo> {
2458 let param_env = cx.param_env();
2460 let addr_space_of_ty = |ty: Ty<'tcx>| {
2461 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2464 let pointee_info = match *this.ty.kind() {
2465 ty::RawPtr(mt) if offset.bytes() == 0 => {
2466 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2468 align: layout.align.abi,
2470 address_space: addr_space_of_ty(mt.ty),
2473 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2474 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2476 align: layout.align.abi,
2478 address_space: cx.data_layout().instruction_address_space,
2481 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2482 let address_space = addr_space_of_ty(ty);
2483 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2484 // Use conservative pointer kind if not optimizing. This saves us the
2485 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2486 // attributes in LLVM have compile-time cost even in unoptimized builds).
2490 hir::Mutability::Not => {
2491 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2497 hir::Mutability::Mut => {
2498 // References to self-referential structures should not be considered
2499 // noalias, as another pointer to the structure can be obtained, that
2500 // is not based-on the original reference. We consider all !Unpin
2501 // types to be potentially self-referential here.
2502 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2503 PointerKind::UniqueBorrowed
2511 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2513 align: layout.align.abi,
2520 let mut data_variant = match this.variants {
2521 // Within the discriminant field, only the niche itself is
2522 // always initialized, so we only check for a pointer at its
2525 // If the niche is a pointer, it's either valid (according
2526 // to its type), or null (which the niche field's scalar
2527 // validity range encodes). This allows using
2528 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2529 // this will continue to work as long as we don't start
2530 // using more niches than just null (e.g., the first page of
2531 // the address space, or unaligned pointers).
2532 Variants::Multiple {
2533 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2536 } if this.fields.offset(tag_field) == offset => {
2537 Some(this.for_variant(cx, dataful_variant))
2542 if let Some(variant) = data_variant {
2543 // We're not interested in any unions.
2544 if let FieldsShape::Union(_) = variant.fields {
2545 data_variant = None;
2549 let mut result = None;
2551 if let Some(variant) = data_variant {
2552 let ptr_end = offset + Pointer.size(cx);
2553 for i in 0..variant.fields.count() {
2554 let field_start = variant.fields.offset(i);
2555 if field_start <= offset {
2556 let field = variant.field(cx, i);
2557 result = field.to_result().ok().and_then(|field| {
2558 if ptr_end <= field_start + field.size {
2559 // We found the right field, look inside it.
2561 field.pointee_info_at(cx, offset - field_start);
2567 if result.is_some() {
2574 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2575 if let Some(ref mut pointee) = result {
2576 if let ty::Adt(def, _) = this.ty.kind() {
2577 if def.is_box() && offset.bytes() == 0 {
2578 pointee.safe = Some(PointerKind::UniqueOwned);
2588 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2597 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2598 matches!(this.ty.kind(), ty::Adt(..))
2601 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2602 this.ty.kind() == &ty::Never
2605 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2606 matches!(this.ty.kind(), ty::Tuple(..))
2609 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2610 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2614 impl<'tcx> ty::Instance<'tcx> {
2615 // NOTE(eddyb) this is private to avoid using it from outside of
2616 // `fn_abi_of_instance` - any other uses are either too high-level
2617 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2618 // or should go through `FnAbi` instead, to avoid losing any
2619 // adjustments `fn_abi_of_instance` might be performing.
2620 fn fn_sig_for_fn_abi(
2623 param_env: ty::ParamEnv<'tcx>,
2624 ) -> ty::PolyFnSig<'tcx> {
2625 let ty = self.ty(tcx, param_env);
2628 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2629 // parameters unused if they show up in the signature, but not in the `mir::Body`
2630 // (i.e. due to being inside a projection that got normalized, see
2631 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2632 // track of a polymorphization `ParamEnv` to allow normalizing later.
2633 let mut sig = match *ty.kind() {
2634 ty::FnDef(def_id, substs) => tcx
2635 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2636 .subst(tcx, substs),
2637 _ => unreachable!(),
2640 if let ty::InstanceDef::VtableShim(..) = self.def {
2641 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2642 sig = sig.map_bound(|mut sig| {
2643 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2644 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2645 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2651 ty::Closure(def_id, substs) => {
2652 let sig = substs.as_closure().sig();
2654 let bound_vars = tcx.mk_bound_variable_kinds(
2657 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2659 let br = ty::BoundRegion {
2660 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2661 kind: ty::BoundRegionKind::BrEnv,
2663 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2664 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2666 let sig = sig.skip_binder();
2667 ty::Binder::bind_with_vars(
2669 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2678 ty::Generator(_, substs, _) => {
2679 let sig = substs.as_generator().poly_sig();
2681 let bound_vars = tcx.mk_bound_variable_kinds(
2684 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2686 let br = ty::BoundRegion {
2687 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2688 kind: ty::BoundRegionKind::BrEnv,
2690 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2691 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2693 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2694 let pin_adt_ref = tcx.adt_def(pin_did);
2695 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2696 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2698 let sig = sig.skip_binder();
2699 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2700 let state_adt_ref = tcx.adt_def(state_did);
2701 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2702 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2703 ty::Binder::bind_with_vars(
2705 [env_ty, sig.resume_ty].iter(),
2708 hir::Unsafety::Normal,
2709 rustc_target::spec::abi::Abi::Rust,
2714 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2719 /// Calculates whether a function's ABI can unwind or not.
2721 /// This takes two primary parameters:
2723 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2724 /// codegen attrs for a defined function. For function pointers this set of
2725 /// flags is the empty set. This is only applicable for Rust-defined
2726 /// functions, and generally isn't needed except for small optimizations where
2727 /// we try to say a function which otherwise might look like it could unwind
2728 /// doesn't actually unwind (such as for intrinsics and such).
2730 /// * `abi` - this is the ABI that the function is defined with. This is the
2731 /// primary factor for determining whether a function can unwind or not.
2733 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2734 /// panics are implemented with unwinds on most platform (when
2735 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2736 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2737 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2738 /// defined for each ABI individually, but it always corresponds to some form of
2739 /// stack-based unwinding (the exact mechanism of which varies
2740 /// platform-by-platform).
2742 /// Rust functions are classified whether or not they can unwind based on the
2743 /// active "panic strategy". In other words Rust functions are considered to
2744 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2745 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2746 /// only if the final panic mode is panic=abort. In this scenario any code
2747 /// previously compiled assuming that a function can unwind is still correct, it
2748 /// just never happens to actually unwind at runtime.
2750 /// This function's answer to whether or not a function can unwind is quite
2751 /// impactful throughout the compiler. This affects things like:
2753 /// * Calling a function which can't unwind means codegen simply ignores any
2754 /// associated unwinding cleanup.
2755 /// * Calling a function which can unwind from a function which can't unwind
2756 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2757 /// aborts the process.
2758 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2759 /// affects various optimizations and codegen.
2761 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2762 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2763 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2764 /// might (from a foreign exception or similar).
2766 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2767 if let Some(did) = fn_def_id {
2768 // Special attribute for functions which can't unwind.
2769 if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2773 // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2775 // This is not part of `codegen_fn_attrs` as it can differ between crates
2776 // and therefore cannot be computed in core.
2777 if tcx.sess.opts.debugging_opts.panic_in_drop == PanicStrategy::Abort {
2778 if Some(did) == tcx.lang_items().drop_in_place_fn() {
2784 // Otherwise if this isn't special then unwinding is generally determined by
2785 // the ABI of the itself. ABIs like `C` have variants which also
2786 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2787 // ABIs have such an option. Otherwise the only other thing here is Rust
2788 // itself, and those ABIs are determined by the panic strategy configured
2789 // for this compilation.
2791 // Unfortunately at this time there's also another caveat. Rust [RFC
2792 // 2945][rfc] has been accepted and is in the process of being implemented
2793 // and stabilized. In this interim state we need to deal with historical
2794 // rustc behavior as well as plan for future rustc behavior.
2796 // Historically functions declared with `extern "C"` were marked at the
2797 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2798 // or not. This is UB for functions in `panic=unwind` mode that then
2799 // actually panic and unwind. Note that this behavior is true for both
2800 // externally declared functions as well as Rust-defined function.
2802 // To fix this UB rustc would like to change in the future to catch unwinds
2803 // from function calls that may unwind within a Rust-defined `extern "C"`
2804 // function and forcibly abort the process, thereby respecting the
2805 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2806 // ready to roll out, so determining whether or not the `C` family of ABIs
2807 // unwinds is conditional not only on their definition but also whether the
2808 // `#![feature(c_unwind)]` feature gate is active.
2810 // Note that this means that unlike historical compilers rustc now, by
2811 // default, unconditionally thinks that the `C` ABI may unwind. This will
2812 // prevent some optimization opportunities, however, so we try to scope this
2813 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2814 // to `panic=abort`).
2816 // Eventually the check against `c_unwind` here will ideally get removed and
2817 // this'll be a little cleaner as it'll be a straightforward check of the
2820 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2826 | Stdcall { unwind }
2827 | Fastcall { unwind }
2828 | Vectorcall { unwind }
2829 | Thiscall { unwind }
2832 | SysV64 { unwind } => {
2834 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2842 | AvrNonBlockingInterrupt
2843 | CCmseNonSecureCall
2847 | Unadjusted => false,
2848 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2853 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2854 use rustc_target::spec::abi::Abi::*;
2855 match tcx.sess.target.adjust_abi(abi) {
2856 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2858 // It's the ABI's job to select this, not ours.
2859 System { .. } => bug!("system abi should be selected elsewhere"),
2860 EfiApi => bug!("eficall abi should be selected elsewhere"),
2862 Stdcall { .. } => Conv::X86Stdcall,
2863 Fastcall { .. } => Conv::X86Fastcall,
2864 Vectorcall { .. } => Conv::X86VectorCall,
2865 Thiscall { .. } => Conv::X86ThisCall,
2866 C { .. } => Conv::C,
2867 Unadjusted => Conv::C,
2868 Win64 { .. } => Conv::X86_64Win64,
2869 SysV64 { .. } => Conv::X86_64SysV,
2870 Aapcs { .. } => Conv::ArmAapcs,
2871 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2872 PtxKernel => Conv::PtxKernel,
2873 Msp430Interrupt => Conv::Msp430Intr,
2874 X86Interrupt => Conv::X86Intr,
2875 AmdGpuKernel => Conv::AmdGpuKernel,
2876 AvrInterrupt => Conv::AvrInterrupt,
2877 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2880 // These API constants ought to be more specific...
2881 Cdecl { .. } => Conv::C,
2885 /// Error produced by attempting to compute or adjust a `FnAbi`.
2886 #[derive(Copy, Clone, Debug, HashStable)]
2887 pub enum FnAbiError<'tcx> {
2888 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2889 Layout(LayoutError<'tcx>),
2891 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2892 AdjustForForeignAbi(call::AdjustForForeignAbiError),
2895 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2896 fn from(err: LayoutError<'tcx>) -> Self {
2901 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2902 fn from(err: call::AdjustForForeignAbiError) -> Self {
2903 Self::AdjustForForeignAbi(err)
2907 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2908 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2910 Self::Layout(err) => err.fmt(f),
2911 Self::AdjustForForeignAbi(err) => err.fmt(f),
2916 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2917 // just for error handling.
2919 pub enum FnAbiRequest<'tcx> {
2920 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2921 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2924 /// Trait for contexts that want to be able to compute `FnAbi`s.
2925 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2926 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2927 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2928 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2929 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2931 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2932 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2934 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2935 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2936 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2937 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2938 fn handle_fn_abi_err(
2940 err: FnAbiError<'tcx>,
2942 fn_abi_request: FnAbiRequest<'tcx>,
2943 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2946 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2947 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2948 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2950 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2951 /// instead, where the instance is an `InstanceDef::Virtual`.
2953 fn fn_abi_of_fn_ptr(
2955 sig: ty::PolyFnSig<'tcx>,
2956 extra_args: &'tcx ty::List<Ty<'tcx>>,
2957 ) -> Self::FnAbiOfResult {
2958 // FIXME(eddyb) get a better `span` here.
2959 let span = self.layout_tcx_at_span();
2960 let tcx = self.tcx().at(span);
2962 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2963 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2967 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2968 /// direct calls to an `fn`.
2970 /// NB: that includes virtual calls, which are represented by "direct calls"
2971 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2973 fn fn_abi_of_instance(
2975 instance: ty::Instance<'tcx>,
2976 extra_args: &'tcx ty::List<Ty<'tcx>>,
2977 ) -> Self::FnAbiOfResult {
2978 // FIXME(eddyb) get a better `span` here.
2979 let span = self.layout_tcx_at_span();
2980 let tcx = self.tcx().at(span);
2983 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2984 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2985 // we can get some kind of span even if one wasn't provided.
2986 // However, we don't do this early in order to avoid calling
2987 // `def_span` unconditionally (which may have a perf penalty).
2988 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2989 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2995 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2997 fn fn_abi_of_fn_ptr<'tcx>(
2999 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3000 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3001 let (param_env, (sig, extra_args)) = query.into_parts();
3003 LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3006 fn fn_abi_of_instance<'tcx>(
3008 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3009 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3010 let (param_env, (instance, extra_args)) = query.into_parts();
3012 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3014 let caller_location = if instance.def.requires_caller_location(tcx) {
3015 Some(tcx.caller_location_ty())
3020 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3024 Some(instance.def_id()),
3025 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3029 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3030 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3031 // arguments of this method, into a separate `struct`.
3032 fn fn_abi_new_uncached(
3034 sig: ty::PolyFnSig<'tcx>,
3035 extra_args: &[Ty<'tcx>],
3036 caller_location: Option<Ty<'tcx>>,
3037 fn_def_id: Option<DefId>,
3038 // FIXME(eddyb) replace this with something typed, like an `enum`.
3039 force_thin_self_ptr: bool,
3040 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3041 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3043 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3045 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3047 let mut inputs = sig.inputs();
3048 let extra_args = if sig.abi == RustCall {
3049 assert!(!sig.c_variadic && extra_args.is_empty());
3051 if let Some(input) = sig.inputs().last() {
3052 if let ty::Tuple(tupled_arguments) = input.kind() {
3053 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3057 "argument to function with \"rust-call\" ABI \
3063 "argument to function with \"rust-call\" ABI \
3068 assert!(sig.c_variadic || extra_args.is_empty());
3072 let target = &self.tcx.sess.target;
3073 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3074 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3075 let linux_s390x_gnu_like =
3076 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3077 let linux_sparc64_gnu_like =
3078 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3079 let linux_powerpc_gnu_like =
3080 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3082 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3084 // Handle safe Rust thin and fat pointers.
3085 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3087 layout: TyAndLayout<'tcx>,
3090 // Booleans are always a noundef i1 that needs to be zero-extended.
3091 if scalar.is_bool() {
3092 attrs.ext(ArgExtension::Zext);
3093 attrs.set(ArgAttribute::NoUndef);
3097 // Scalars which have invalid values cannot be undef.
3098 if !scalar.is_always_valid(self) {
3099 attrs.set(ArgAttribute::NoUndef);
3102 // Only pointer types handled below.
3103 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3105 if !valid_range.contains(0) {
3106 attrs.set(ArgAttribute::NonNull);
3109 if let Some(pointee) = layout.pointee_info_at(self, offset) {
3110 if let Some(kind) = pointee.safe {
3111 attrs.pointee_align = Some(pointee.align);
3113 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3114 // for the entire duration of the function as they can be deallocated
3115 // at any time. Set their valid size to 0.
3116 attrs.pointee_size = match kind {
3117 PointerKind::UniqueOwned => Size::ZERO,
3121 // `Box`, `&T`, and `&mut T` cannot be undef.
3122 // Note that this only applies to the value of the pointer itself;
3123 // this attribute doesn't make it UB for the pointed-to data to be undef.
3124 attrs.set(ArgAttribute::NoUndef);
3126 // `Box` pointer parameters never alias because ownership is transferred
3127 // `&mut` pointer parameters never alias other parameters,
3128 // or mutable global data
3130 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3131 // and can be marked as both `readonly` and `noalias`, as
3132 // LLVM's definition of `noalias` is based solely on memory
3133 // dependencies rather than pointer equality
3135 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3136 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3137 // or not to actually emit the attribute. It can also be controlled with the
3138 // `-Zmutable-noalias` debugging option.
3139 let no_alias = match kind {
3140 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3141 PointerKind::UniqueOwned => true,
3142 PointerKind::Frozen => !is_return,
3145 attrs.set(ArgAttribute::NoAlias);
3148 if kind == PointerKind::Frozen && !is_return {
3149 attrs.set(ArgAttribute::ReadOnly);
3152 if kind == PointerKind::UniqueBorrowed && !is_return {
3153 attrs.set(ArgAttribute::NoAliasMutRef);
3159 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3160 let is_return = arg_idx.is_none();
3162 let layout = self.layout_of(ty)?;
3163 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3164 // Don't pass the vtable, it's not an argument of the virtual fn.
3165 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3166 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3167 make_thin_self_ptr(self, layout)
3172 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3173 let mut attrs = ArgAttributes::new();
3174 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3178 if arg.layout.is_zst() {
3179 // For some forsaken reason, x86_64-pc-windows-gnu
3180 // doesn't ignore zero-sized struct arguments.
3181 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3185 && !linux_s390x_gnu_like
3186 && !linux_sparc64_gnu_like
3187 && !linux_powerpc_gnu_like)
3189 arg.mode = PassMode::Ignore;
3196 let mut fn_abi = FnAbi {
3197 ret: arg_of(sig.output(), None)?,
3201 .chain(extra_args.iter().copied())
3202 .chain(caller_location)
3204 .map(|(i, ty)| arg_of(ty, Some(i)))
3205 .collect::<Result<_, _>>()?,
3206 c_variadic: sig.c_variadic,
3207 fixed_count: inputs.len(),
3209 can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3211 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3212 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3213 Ok(self.tcx.arena.alloc(fn_abi))
3216 fn fn_abi_adjust_for_abi(
3218 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3220 ) -> Result<(), FnAbiError<'tcx>> {
3221 if abi == SpecAbi::Unadjusted {
3225 if abi == SpecAbi::Rust
3226 || abi == SpecAbi::RustCall
3227 || abi == SpecAbi::RustIntrinsic
3228 || abi == SpecAbi::PlatformIntrinsic
3230 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3231 if arg.is_ignore() {
3235 match arg.layout.abi {
3236 Abi::Aggregate { .. } => {}
3238 // This is a fun case! The gist of what this is doing is
3239 // that we want callers and callees to always agree on the
3240 // ABI of how they pass SIMD arguments. If we were to *not*
3241 // make these arguments indirect then they'd be immediates
3242 // in LLVM, which means that they'd used whatever the
3243 // appropriate ABI is for the callee and the caller. That
3244 // means, for example, if the caller doesn't have AVX
3245 // enabled but the callee does, then passing an AVX argument
3246 // across this boundary would cause corrupt data to show up.
3248 // This problem is fixed by unconditionally passing SIMD
3249 // arguments through memory between callers and callees
3250 // which should get them all to agree on ABI regardless of
3251 // target feature sets. Some more information about this
3252 // issue can be found in #44367.
3254 // Note that the platform intrinsic ABI is exempt here as
3255 // that's how we connect up to LLVM and it's unstable
3256 // anyway, we control all calls to it in libstd.
3258 if abi != SpecAbi::PlatformIntrinsic
3259 && self.tcx.sess.target.simd_types_indirect =>
3261 arg.make_indirect();
3268 let size = arg.layout.size;
3269 if arg.layout.is_unsized() || size > Pointer.size(self) {
3270 arg.make_indirect();
3272 // We want to pass small aggregates as immediates, but using
3273 // a LLVM aggregate type for this leads to bad optimizations,
3274 // so we pick an appropriately sized integer type instead.
3275 arg.cast_to(Reg { kind: RegKind::Integer, size });
3278 fixup(&mut fn_abi.ret);
3279 for arg in &mut fn_abi.args {
3283 fn_abi.adjust_for_foreign_abi(self, abi)?;
3290 fn make_thin_self_ptr<'tcx>(
3291 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3292 layout: TyAndLayout<'tcx>,
3293 ) -> TyAndLayout<'tcx> {
3295 let fat_pointer_ty = if layout.is_unsized() {
3296 // unsized `self` is passed as a pointer to `self`
3297 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3298 tcx.mk_mut_ptr(layout.ty)
3301 Abi::ScalarPair(..) => (),
3302 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3305 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3306 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3307 // elsewhere in the compiler as a method on a `dyn Trait`.
3308 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3309 // get a built-in pointer type
3310 let mut fat_pointer_layout = layout;
3311 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3312 && !fat_pointer_layout.ty.is_region_ptr()
3314 for i in 0..fat_pointer_layout.fields.count() {
3315 let field_layout = fat_pointer_layout.field(cx, i);
3317 if !field_layout.is_zst() {
3318 fat_pointer_layout = field_layout;
3319 continue 'descend_newtypes;
3323 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3326 fat_pointer_layout.ty
3329 // we now have a type like `*mut RcBox<dyn Trait>`
3330 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3331 // this is understood as a special case elsewhere in the compiler
3332 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3337 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3338 // should always work because the type is always `*mut ()`.
3339 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()