1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_attr as attr;
9 use rustc_hir::lang_items::LangItem;
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::{Idx, IndexVec};
12 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::{Span, DUMMY_SP};
15 use rustc_target::abi::call::{
16 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
18 use rustc_target::abi::*;
19 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
24 use std::num::NonZeroUsize;
27 use rand::{seq::SliceRandom, SeedableRng};
28 use rand_xoshiro::Xoshiro128StarStar;
30 pub fn provide(providers: &mut ty::query::Providers) {
32 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
35 pub trait IntegerExt {
36 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
37 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
38 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
39 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
49 impl IntegerExt for Integer {
51 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
52 match (*self, signed) {
53 (I8, false) => tcx.types.u8,
54 (I16, false) => tcx.types.u16,
55 (I32, false) => tcx.types.u32,
56 (I64, false) => tcx.types.u64,
57 (I128, false) => tcx.types.u128,
58 (I8, true) => tcx.types.i8,
59 (I16, true) => tcx.types.i16,
60 (I32, true) => tcx.types.i32,
61 (I64, true) => tcx.types.i64,
62 (I128, true) => tcx.types.i128,
66 /// Gets the Integer type from an attr::IntType.
67 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
68 let dl = cx.data_layout();
71 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
72 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
73 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
74 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
75 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
76 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
77 dl.ptr_sized_integer()
82 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
85 ty::IntTy::I16 => I16,
86 ty::IntTy::I32 => I32,
87 ty::IntTy::I64 => I64,
88 ty::IntTy::I128 => I128,
89 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
92 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
95 ty::UintTy::U16 => I16,
96 ty::UintTy::U32 => I32,
97 ty::UintTy::U64 => I64,
98 ty::UintTy::U128 => I128,
99 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
103 /// Finds the appropriate Integer type and signedness for the given
104 /// signed discriminant range and `#[repr]` attribute.
105 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
106 /// that shouldn't affect anything, other than maybe debuginfo.
113 ) -> (Integer, bool) {
114 // Theoretically, negative values could be larger in unsigned representation
115 // than the unsigned representation of the signed minimum. However, if there
116 // are any negative values, the only valid unsigned representation is u128
117 // which can fit all i128 values, so the result remains unaffected.
118 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
119 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
121 if let Some(ity) = repr.int {
122 let discr = Integer::from_attr(&tcx, ity);
123 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
126 "Integer::repr_discr: `#[repr]` hint too small for \
127 discriminant range of enum `{}",
131 return (discr, ity.is_signed());
134 let at_least = if repr.c() {
135 // This is usually I32, however it can be different on some platforms,
136 // notably hexagon and arm-none/thumb-none
137 tcx.data_layout().c_enum_min_size
139 // repr(Rust) enums try to be as small as possible
143 // If there are no negative values, we can use the unsigned fit.
145 (cmp::max(unsigned_fit, at_least), false)
147 (cmp::max(signed_fit, at_least), true)
152 pub trait PrimitiveExt {
153 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
154 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
157 impl PrimitiveExt for Primitive {
159 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161 Int(i, signed) => i.to_ty(tcx, signed),
162 F32 => tcx.types.f32,
163 F64 => tcx.types.f64,
164 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
168 /// Return an *integer* type matching this primitive.
169 /// Useful in particular when dealing with enum discriminants.
171 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173 Int(i, signed) => i.to_ty(tcx, signed),
174 Pointer => tcx.types.usize,
175 F32 | F64 => bug!("floats do not have an int type"),
180 /// The first half of a fat pointer.
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
186 /// The second half of a fat pointer.
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
192 /// The maximum supported number of lanes in a SIMD vector.
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
202 SizeOverflow(Ty<'tcx>),
203 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
206 impl<'tcx> fmt::Display for LayoutError<'tcx> {
207 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
210 LayoutError::SizeOverflow(ty) => {
211 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213 LayoutError::NormalizationFailure(t, e) => write!(
215 "unable to determine layout for `{}` because `{}` cannot be normalized",
217 e.get_type_for_failure()
223 #[instrument(skip(tcx, query), level = "debug")]
226 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
227 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
228 ty::tls::with_related_context(tcx, move |icx| {
229 let (param_env, ty) = query.into_parts();
232 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
233 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
236 // Update the ImplicitCtxt to increase the layout_depth
237 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
239 ty::tls::enter_context(&icx, |_| {
240 let param_env = param_env.with_reveal_all_normalized(tcx);
241 let unnormalized_ty = ty;
243 // FIXME: We might want to have two different versions of `layout_of`:
244 // One that can be called after typecheck has completed and can use
245 // `normalize_erasing_regions` here and another one that can be called
246 // before typecheck has completed and uses `try_normalize_erasing_regions`.
247 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
249 Err(normalization_error) => {
250 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
254 if ty != unnormalized_ty {
255 // Ensure this layout is also cached for the normalized type.
256 return tcx.layout_of(param_env.and(ty));
259 let cx = LayoutCx { tcx, param_env };
261 let layout = cx.layout_of_uncached(ty)?;
262 let layout = TyAndLayout { ty, layout };
264 cx.record_layout_for_printing(layout);
266 // Type-level uninhabitedness should always imply ABI uninhabitedness.
267 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
268 assert!(layout.abi.is_uninhabited());
276 pub struct LayoutCx<'tcx, C> {
278 pub param_env: ty::ParamEnv<'tcx>,
281 #[derive(Copy, Clone, Debug)]
283 /// A tuple, closure, or univariant which cannot be coerced to unsized.
285 /// A univariant, the last field of which may be coerced to unsized.
287 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
288 Prefixed(Size, Align),
291 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
292 // This is used to go between `memory_index` (source field order to memory order)
293 // and `inverse_memory_index` (memory order to source field order).
294 // See also `FieldsShape::Arbitrary::memory_index` for more details.
295 // FIXME(eddyb) build a better abstraction for permutations, if possible.
296 fn invert_mapping(map: &[u32]) -> Vec<u32> {
297 let mut inverse = vec![0; map.len()];
298 for i in 0..map.len() {
299 inverse[map[i] as usize] = i as u32;
304 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
305 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
306 let dl = self.data_layout();
307 let b_align = b.value.align(dl);
308 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
309 let b_offset = a.value.size(dl).align_to(b_align.abi);
310 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
312 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
313 // returns the last maximum.
314 let largest_niche = Niche::from_scalar(dl, b_offset, b)
316 .chain(Niche::from_scalar(dl, Size::ZERO, a))
317 .max_by_key(|niche| niche.available(dl));
320 variants: Variants::Single { index: VariantIdx::new(0) },
321 fields: FieldsShape::Arbitrary {
322 offsets: vec![Size::ZERO, b_offset],
323 memory_index: vec![0, 1],
325 abi: Abi::ScalarPair(a, b),
332 fn univariant_uninterned(
335 fields: &[TyAndLayout<'_>],
338 ) -> Result<Layout, LayoutError<'tcx>> {
339 let dl = self.data_layout();
340 let pack = repr.pack;
341 if pack.is_some() && repr.align.is_some() {
342 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
343 return Err(LayoutError::Unknown(ty));
346 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
348 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
350 let optimize = !repr.inhibit_struct_field_reordering_opt();
353 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
354 let optimizing = &mut inverse_memory_index[..end];
355 let field_align = |f: &TyAndLayout<'_>| {
356 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
359 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
360 // the field ordering to try and catch some code making assumptions about layouts
361 // we don't guarantee
362 if repr.can_randomize_type_layout() {
363 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
364 // randomize field ordering with
365 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
367 // Shuffle the ordering of the fields
368 optimizing.shuffle(&mut rng);
370 // Otherwise we just leave things alone and actually optimize the type's fields
373 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
374 optimizing.sort_by_key(|&x| {
375 // Place ZSTs first to avoid "interesting offsets",
376 // especially with only one or two non-ZST fields.
377 let f = &fields[x as usize];
378 (!f.is_zst(), cmp::Reverse(field_align(f)))
382 StructKind::Prefixed(..) => {
383 // Sort in ascending alignment so that the layout stays optimal
384 // regardless of the prefix
385 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
389 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
390 // regardless of the status of `-Z randomize-layout`
394 // inverse_memory_index holds field indices by increasing memory offset.
395 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
396 // We now write field offsets to the corresponding offset slot;
397 // field 5 with offset 0 puts 0 in offsets[5].
398 // At the bottom of this function, we invert `inverse_memory_index` to
399 // produce `memory_index` (see `invert_mapping`).
401 let mut sized = true;
402 let mut offsets = vec![Size::ZERO; fields.len()];
403 let mut offset = Size::ZERO;
404 let mut largest_niche = None;
405 let mut largest_niche_available = 0;
407 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
409 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
410 align = align.max(AbiAndPrefAlign::new(prefix_align));
411 offset = prefix_size.align_to(prefix_align);
414 for &i in &inverse_memory_index {
415 let field = fields[i as usize];
417 self.tcx.sess.delay_span_bug(
420 "univariant: field #{} of `{}` comes after unsized field",
427 if field.is_unsized() {
431 // Invariant: offset < dl.obj_size_bound() <= 1<<61
432 let field_align = if let Some(pack) = pack {
433 field.align.min(AbiAndPrefAlign::new(pack))
437 offset = offset.align_to(field_align.abi);
438 align = align.max(field_align);
440 debug!("univariant offset: {:?} field: {:#?}", offset, field);
441 offsets[i as usize] = offset;
443 if !repr.hide_niche() {
444 if let Some(mut niche) = field.largest_niche {
445 let available = niche.available(dl);
446 if available > largest_niche_available {
447 largest_niche_available = available;
448 niche.offset += offset;
449 largest_niche = Some(niche);
454 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
457 if let Some(repr_align) = repr.align {
458 align = align.max(AbiAndPrefAlign::new(repr_align));
461 debug!("univariant min_size: {:?}", offset);
462 let min_size = offset;
464 // As stated above, inverse_memory_index holds field indices by increasing offset.
465 // This makes it an already-sorted view of the offsets vec.
466 // To invert it, consider:
467 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
468 // Field 5 would be the first element, so memory_index is i:
469 // Note: if we didn't optimize, it's already right.
472 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
474 let size = min_size.align_to(align.abi);
475 let mut abi = Abi::Aggregate { sized };
477 // Unpack newtype ABIs and find scalar pairs.
478 if sized && size.bytes() > 0 {
479 // All other fields must be ZSTs.
480 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
482 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
483 // We have exactly one non-ZST field.
484 (Some((i, field)), None, None) => {
485 // Field fills the struct and it has a scalar or scalar pair ABI.
486 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
489 // For plain scalars, or vectors of them, we can't unpack
490 // newtypes for `#[repr(C)]`, as that affects C ABIs.
491 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
494 // But scalar pairs are Rust-specific and get
495 // treated as aggregates by C ABIs anyway.
496 Abi::ScalarPair(..) => {
504 // Two non-ZST fields, and they're both scalars.
506 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })),
507 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })),
510 // Order by the memory placement, not source order.
511 let ((i, a), (j, b)) =
512 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
513 let pair = self.scalar_pair(a, b);
514 let pair_offsets = match pair.fields {
515 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
516 assert_eq!(memory_index, &[0, 1]);
521 if offsets[i] == pair_offsets[0]
522 && offsets[j] == pair_offsets[1]
523 && align == pair.align
526 // We can use `ScalarPair` only when it matches our
527 // already computed layout (including `#[repr(C)]`).
536 if fields.iter().any(|f| f.abi.is_uninhabited()) {
537 abi = Abi::Uninhabited;
541 variants: Variants::Single { index: VariantIdx::new(0) },
542 fields: FieldsShape::Arbitrary { offsets, memory_index },
550 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
552 let param_env = self.param_env;
553 let dl = self.data_layout();
554 let scalar_unit = |value: Primitive| {
555 let size = value.size(dl);
556 assert!(size.bits() <= 128);
557 Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } }
559 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
561 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
562 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
564 debug_assert!(!ty.has_infer_types_or_consts());
566 Ok(match *ty.kind() {
568 ty::Bool => tcx.intern_layout(Layout::scalar(
570 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
572 ty::Char => tcx.intern_layout(Layout::scalar(
575 value: Int(I32, false),
576 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
579 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
580 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
581 ty::Float(fty) => scalar(match fty {
582 ty::FloatTy::F32 => F32,
583 ty::FloatTy::F64 => F64,
586 let mut ptr = scalar_unit(Pointer);
587 ptr.valid_range = ptr.valid_range.with_start(1);
588 tcx.intern_layout(Layout::scalar(self, ptr))
592 ty::Never => tcx.intern_layout(Layout {
593 variants: Variants::Single { index: VariantIdx::new(0) },
594 fields: FieldsShape::Primitive,
595 abi: Abi::Uninhabited,
601 // Potentially-wide pointers.
602 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
603 let mut data_ptr = scalar_unit(Pointer);
604 if !ty.is_unsafe_ptr() {
605 data_ptr.valid_range = data_ptr.valid_range.with_start(1);
608 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
609 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
610 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
613 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
614 let metadata = match unsized_part.kind() {
616 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
618 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
620 let mut vtable = scalar_unit(Pointer);
621 vtable.valid_range = vtable.valid_range.with_start(1);
624 _ => return Err(LayoutError::Unknown(unsized_part)),
627 // Effectively a (ptr, meta) tuple.
628 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
631 // Arrays and slices.
632 ty::Array(element, mut count) => {
633 if count.has_projections() {
634 count = tcx.normalize_erasing_regions(param_env, count);
635 if count.has_projections() {
636 return Err(LayoutError::Unknown(ty));
640 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
641 let element = self.layout_of(element)?;
643 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
646 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
649 Abi::Aggregate { sized: true }
652 let largest_niche = if count != 0 { element.largest_niche } else { None };
654 tcx.intern_layout(Layout {
655 variants: Variants::Single { index: VariantIdx::new(0) },
656 fields: FieldsShape::Array { stride: element.size, count },
659 align: element.align,
663 ty::Slice(element) => {
664 let element = self.layout_of(element)?;
665 tcx.intern_layout(Layout {
666 variants: Variants::Single { index: VariantIdx::new(0) },
667 fields: FieldsShape::Array { stride: element.size, count: 0 },
668 abi: Abi::Aggregate { sized: false },
670 align: element.align,
674 ty::Str => tcx.intern_layout(Layout {
675 variants: Variants::Single { index: VariantIdx::new(0) },
676 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
677 abi: Abi::Aggregate { sized: false },
684 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
685 ty::Dynamic(..) | ty::Foreign(..) => {
686 let mut unit = self.univariant_uninterned(
689 &ReprOptions::default(),
690 StructKind::AlwaysSized,
693 Abi::Aggregate { ref mut sized } => *sized = false,
696 tcx.intern_layout(unit)
699 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
701 ty::Closure(_, ref substs) => {
702 let tys = substs.as_closure().upvar_tys();
704 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
705 &ReprOptions::default(),
706 StructKind::AlwaysSized,
712 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
715 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
716 &ReprOptions::default(),
721 // SIMD vector types.
722 ty::Adt(def, substs) if def.repr.simd() => {
723 if !def.is_struct() {
724 // Should have yielded E0517 by now.
725 tcx.sess.delay_span_bug(
727 "#[repr(simd)] was applied to an ADT that is not a struct",
729 return Err(LayoutError::Unknown(ty));
732 // Supported SIMD vectors are homogeneous ADTs with at least one field:
734 // * #[repr(simd)] struct S(T, T, T, T);
735 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
736 // * #[repr(simd)] struct S([T; 4])
738 // where T is a primitive scalar (integer/float/pointer).
740 // SIMD vectors with zero fields are not supported.
741 // (should be caught by typeck)
742 if def.non_enum_variant().fields.is_empty() {
743 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
746 // Type of the first ADT field:
747 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
749 // Heterogeneous SIMD vectors are not supported:
750 // (should be caught by typeck)
751 for fi in &def.non_enum_variant().fields {
752 if fi.ty(tcx, substs) != f0_ty {
753 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
757 // The element type and number of elements of the SIMD vector
758 // are obtained from:
760 // * the element type and length of the single array field, if
761 // the first field is of array type, or
763 // * the homogenous field type and the number of fields.
764 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
765 // First ADT field is an array:
767 // SIMD vectors with multiple array fields are not supported:
768 // (should be caught by typeck)
769 if def.non_enum_variant().fields.len() != 1 {
770 tcx.sess.fatal(&format!(
771 "monomorphising SIMD type `{}` with more than one array field",
776 // Extract the number of elements from the layout of the array field:
778 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
780 }) = self.layout_of(f0_ty) else {
781 return Err(LayoutError::Unknown(ty));
784 (*e_ty, *count, true)
786 // First ADT field is not an array:
787 (f0_ty, def.non_enum_variant().fields.len() as _, false)
790 // SIMD vectors of zero length are not supported.
791 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
794 // Can't be caught in typeck if the array length is generic.
796 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
797 } else if e_len > MAX_SIMD_LANES {
798 tcx.sess.fatal(&format!(
799 "monomorphising SIMD type `{}` of length greater than {}",
804 // Compute the ABI of the element type:
805 let e_ly = self.layout_of(e_ty)?;
806 let Abi::Scalar(e_abi) = e_ly.abi else {
807 // This error isn't caught in typeck, e.g., if
808 // the element type of the vector is generic.
809 tcx.sess.fatal(&format!(
810 "monomorphising SIMD type `{}` with a non-primitive-scalar \
811 (integer/float/pointer) element type `{}`",
816 // Compute the size and alignment of the vector:
817 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
818 let align = dl.vector_align(size);
819 let size = size.align_to(align.abi);
821 // Compute the placement of the vector fields:
822 let fields = if is_array {
823 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
825 FieldsShape::Array { stride: e_ly.size, count: e_len }
828 tcx.intern_layout(Layout {
829 variants: Variants::Single { index: VariantIdx::new(0) },
831 abi: Abi::Vector { element: e_abi, count: e_len },
832 largest_niche: e_ly.largest_niche,
839 ty::Adt(def, substs) => {
840 // Cache the field layouts.
847 .map(|field| self.layout_of(field.ty(tcx, substs)))
848 .collect::<Result<Vec<_>, _>>()
850 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
853 if def.repr.pack.is_some() && def.repr.align.is_some() {
854 self.tcx.sess.delay_span_bug(
855 tcx.def_span(def.did),
856 "union cannot be packed and aligned",
858 return Err(LayoutError::Unknown(ty));
862 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
864 if let Some(repr_align) = def.repr.align {
865 align = align.max(AbiAndPrefAlign::new(repr_align));
868 let optimize = !def.repr.inhibit_union_abi_opt();
869 let mut size = Size::ZERO;
870 let mut abi = Abi::Aggregate { sized: true };
871 let index = VariantIdx::new(0);
872 for field in &variants[index] {
873 assert!(!field.is_unsized());
874 align = align.max(field.align);
876 // If all non-ZST fields have the same ABI, forward this ABI
877 if optimize && !field.is_zst() {
878 // Normalize scalar_unit to the maximal valid range
879 let field_abi = match field.abi {
880 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
881 Abi::ScalarPair(x, y) => {
882 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
884 Abi::Vector { element: x, count } => {
885 Abi::Vector { element: scalar_unit(x.value), count }
887 Abi::Uninhabited | Abi::Aggregate { .. } => {
888 Abi::Aggregate { sized: true }
892 if size == Size::ZERO {
893 // first non ZST: initialize 'abi'
895 } else if abi != field_abi {
896 // different fields have different ABI: reset to Aggregate
897 abi = Abi::Aggregate { sized: true };
901 size = cmp::max(size, field.size);
904 if let Some(pack) = def.repr.pack {
905 align = align.min(AbiAndPrefAlign::new(pack));
908 return Ok(tcx.intern_layout(Layout {
909 variants: Variants::Single { index },
910 fields: FieldsShape::Union(
911 NonZeroUsize::new(variants[index].len())
912 .ok_or(LayoutError::Unknown(ty))?,
917 size: size.align_to(align.abi),
921 // A variant is absent if it's uninhabited and only has ZST fields.
922 // Present uninhabited variants only require space for their fields,
923 // but *not* an encoding of the discriminant (e.g., a tag value).
924 // See issue #49298 for more details on the need to leave space
925 // for non-ZST uninhabited data (mostly partial initialization).
926 let absent = |fields: &[TyAndLayout<'_>]| {
927 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
928 let is_zst = fields.iter().all(|f| f.is_zst());
929 uninhabited && is_zst
931 let (present_first, present_second) = {
932 let mut present_variants = variants
934 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
935 (present_variants.next(), present_variants.next())
937 let present_first = match present_first {
938 Some(present_first) => present_first,
939 // Uninhabited because it has no variants, or only absent ones.
940 None if def.is_enum() => {
941 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
943 // If it's a struct, still compute a layout so that we can still compute the
945 None => VariantIdx::new(0),
948 let is_struct = !def.is_enum() ||
949 // Only one variant is present.
950 (present_second.is_none() &&
951 // Representation optimizations are allowed.
952 !def.repr.inhibit_enum_layout_opt());
954 // Struct, or univariant enum equivalent to a struct.
955 // (Typechecking will reject discriminant-sizing attrs.)
957 let v = present_first;
958 let kind = if def.is_enum() || variants[v].is_empty() {
959 StructKind::AlwaysSized
961 let param_env = tcx.param_env(def.did);
962 let last_field = def.variants[v].fields.last().unwrap();
964 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
966 StructKind::MaybeUnsized
968 StructKind::AlwaysSized
972 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
973 st.variants = Variants::Single { index: v };
974 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
976 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
977 // the asserts ensure that we are not using the
978 // `#[rustc_layout_scalar_valid_range(n)]`
979 // attribute to widen the range of anything as that would probably
980 // result in UB somewhere
981 // FIXME(eddyb) the asserts are probably not needed,
982 // as larger validity ranges would result in missed
983 // optimizations, *not* wrongly assuming the inner
984 // value is valid. e.g. unions enlarge validity ranges,
985 // because the values may be uninitialized.
986 if let Bound::Included(start) = start {
987 // FIXME(eddyb) this might be incorrect - it doesn't
988 // account for wrap-around (end < start) ranges.
989 assert!(scalar.valid_range.start <= start);
990 scalar.valid_range.start = start;
992 if let Bound::Included(end) = end {
993 // FIXME(eddyb) this might be incorrect - it doesn't
994 // account for wrap-around (end < start) ranges.
995 assert!(scalar.valid_range.end >= end);
996 scalar.valid_range.end = end;
999 // Update `largest_niche` if we have introduced a larger niche.
1000 let niche = if def.repr.hide_niche() {
1003 Niche::from_scalar(dl, Size::ZERO, *scalar)
1005 if let Some(niche) = niche {
1006 match st.largest_niche {
1007 Some(largest_niche) => {
1008 // Replace the existing niche even if they're equal,
1009 // because this one is at a lower offset.
1010 if largest_niche.available(dl) <= niche.available(dl) {
1011 st.largest_niche = Some(niche);
1014 None => st.largest_niche = Some(niche),
1019 start == Bound::Unbounded && end == Bound::Unbounded,
1020 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1026 return Ok(tcx.intern_layout(st));
1029 // At this point, we have handled all unions and
1030 // structs. (We have also handled univariant enums
1031 // that allow representation optimization.)
1032 assert!(def.is_enum());
1034 // The current code for niche-filling relies on variant indices
1035 // instead of actual discriminants, so dataful enums with
1036 // explicit discriminants (RFC #2363) would misbehave.
1037 let no_explicit_discriminants = def
1040 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1042 let mut niche_filling_layout = None;
1044 // Niche-filling enum optimization.
1045 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1046 let mut dataful_variant = None;
1047 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1049 // Find one non-ZST variant.
1050 'variants: for (v, fields) in variants.iter_enumerated() {
1056 if dataful_variant.is_none() {
1057 dataful_variant = Some(v);
1060 dataful_variant = None;
1065 niche_variants = *niche_variants.start().min(&v)..=v;
1068 if niche_variants.start() > niche_variants.end() {
1069 dataful_variant = None;
1072 if let Some(i) = dataful_variant {
1073 let count = (niche_variants.end().as_u32()
1074 - niche_variants.start().as_u32()
1077 // Find the field with the largest niche
1078 let niche_candidate = variants[i]
1081 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1082 .max_by_key(|(_, niche)| niche.available(dl));
1084 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1085 niche_candidate.and_then(|(field_index, niche)| {
1086 Some((field_index, niche, niche.reserve(self, count)?))
1089 let mut align = dl.aggregate_align;
1093 let mut st = self.univariant_uninterned(
1097 StructKind::AlwaysSized,
1099 st.variants = Variants::Single { index: j };
1101 align = align.max(st.align);
1105 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1107 let offset = st[i].fields.offset(field_index) + niche.offset;
1108 let size = st[i].size;
1110 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1114 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1115 Abi::ScalarPair(first, second) => {
1116 // We need to use scalar_unit to reset the
1117 // valid range to the maximal one for that
1118 // primitive, because only the niche is
1119 // guaranteed to be initialised, not the
1121 if offset.bytes() == 0 {
1122 Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
1124 Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
1127 _ => Abi::Aggregate { sized: true },
1131 let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1133 niche_filling_layout = Some(Layout {
1134 variants: Variants::Multiple {
1136 tag_encoding: TagEncoding::Niche {
1144 fields: FieldsShape::Arbitrary {
1145 offsets: vec![offset],
1146 memory_index: vec![0],
1157 let (mut min, mut max) = (i128::MAX, i128::MIN);
1158 let discr_type = def.repr.discr_type();
1159 let bits = Integer::from_attr(self, discr_type).size().bits();
1160 for (i, discr) in def.discriminants(tcx) {
1161 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1164 let mut x = discr.val as i128;
1165 if discr_type.is_signed() {
1166 // sign extend the raw representation to be an i128
1167 x = (x << (128 - bits)) >> (128 - bits);
1176 // We might have no inhabited variants, so pretend there's at least one.
1177 if (min, max) == (i128::MAX, i128::MIN) {
1181 assert!(min <= max, "discriminant range is {}...{}", min, max);
1182 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1184 let mut align = dl.aggregate_align;
1185 let mut size = Size::ZERO;
1187 // We're interested in the smallest alignment, so start large.
1188 let mut start_align = Align::from_bytes(256).unwrap();
1189 assert_eq!(Integer::for_align(dl, start_align), None);
1191 // repr(C) on an enum tells us to make a (tag, union) layout,
1192 // so we need to grow the prefix alignment to be at least
1193 // the alignment of the union. (This value is used both for
1194 // determining the alignment of the overall enum, and the
1195 // determining the alignment of the payload after the tag.)
1196 let mut prefix_align = min_ity.align(dl).abi;
1198 for fields in &variants {
1199 for field in fields {
1200 prefix_align = prefix_align.max(field.align.abi);
1205 // Create the set of structs that represent each variant.
1206 let mut layout_variants = variants
1208 .map(|(i, field_layouts)| {
1209 let mut st = self.univariant_uninterned(
1213 StructKind::Prefixed(min_ity.size(), prefix_align),
1215 st.variants = Variants::Single { index: i };
1216 // Find the first field we can't move later
1217 // to make room for a larger discriminant.
1219 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1221 if !field.is_zst() || field.align.abi.bytes() != 1 {
1222 start_align = start_align.min(field.align.abi);
1226 size = cmp::max(size, st.size);
1227 align = align.max(st.align);
1230 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1232 // Align the maximum variant size to the largest alignment.
1233 size = size.align_to(align.abi);
1235 if size.bytes() >= dl.obj_size_bound() {
1236 return Err(LayoutError::SizeOverflow(ty));
1239 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1240 if typeck_ity < min_ity {
1241 // It is a bug if Layout decided on a greater discriminant size than typeck for
1242 // some reason at this point (based on values discriminant can take on). Mostly
1243 // because this discriminant will be loaded, and then stored into variable of
1244 // type calculated by typeck. Consider such case (a bug): typeck decided on
1245 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1246 // discriminant values. That would be a bug, because then, in codegen, in order
1247 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1248 // space necessary to represent would have to be discarded (or layout is wrong
1249 // on thinking it needs 16 bits)
1251 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1255 // However, it is fine to make discr type however large (as an optimisation)
1256 // after this point – we’ll just truncate the value we load in codegen.
1259 // Check to see if we should use a different type for the
1260 // discriminant. We can safely use a type with the same size
1261 // as the alignment of the first field of each variant.
1262 // We increase the size of the discriminant to avoid LLVM copying
1263 // padding when it doesn't need to. This normally causes unaligned
1264 // load/stores and excessive memcpy/memset operations. By using a
1265 // bigger integer size, LLVM can be sure about its contents and
1266 // won't be so conservative.
1268 // Use the initial field alignment
1269 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1272 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1275 // If the alignment is not larger than the chosen discriminant size,
1276 // don't use the alignment as the final size.
1280 // Patch up the variants' first few fields.
1281 let old_ity_size = min_ity.size();
1282 let new_ity_size = ity.size();
1283 for variant in &mut layout_variants {
1284 match variant.fields {
1285 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1287 if *i <= old_ity_size {
1288 assert_eq!(*i, old_ity_size);
1292 // We might be making the struct larger.
1293 if variant.size <= old_ity_size {
1294 variant.size = new_ity_size;
1302 let tag_mask = ity.size().unsigned_int_max();
1304 value: Int(ity, signed),
1305 valid_range: WrappingRange {
1306 start: (min as u128 & tag_mask),
1307 end: (max as u128 & tag_mask),
1310 let mut abi = Abi::Aggregate { sized: true };
1312 // Without latter check aligned enums with custom discriminant values
1313 // Would result in ICE see the issue #92464 for more info
1314 if tag.value.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1315 abi = Abi::Scalar(tag);
1317 // Try to use a ScalarPair for all tagged enums.
1318 let mut common_prim = None;
1319 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1320 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1324 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1325 let (field, offset) = match (fields.next(), fields.next()) {
1326 (None, None) => continue,
1327 (Some(pair), None) => pair,
1333 let prim = match field.abi {
1334 Abi::Scalar(scalar) => scalar.value,
1340 if let Some(pair) = common_prim {
1341 // This is pretty conservative. We could go fancier
1342 // by conflating things like i32 and u32, or even
1343 // realising that (u8, u8) could just cohabit with
1345 if pair != (prim, offset) {
1350 common_prim = Some((prim, offset));
1353 if let Some((prim, offset)) = common_prim {
1354 let pair = self.scalar_pair(tag, scalar_unit(prim));
1355 let pair_offsets = match pair.fields {
1356 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1357 assert_eq!(memory_index, &[0, 1]);
1362 if pair_offsets[0] == Size::ZERO
1363 && pair_offsets[1] == *offset
1364 && align == pair.align
1365 && size == pair.size
1367 // We can use `ScalarPair` only when it matches our
1368 // already computed layout (including `#[repr(C)]`).
1374 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1375 abi = Abi::Uninhabited;
1378 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1380 let tagged_layout = Layout {
1381 variants: Variants::Multiple {
1383 tag_encoding: TagEncoding::Direct,
1385 variants: layout_variants,
1387 fields: FieldsShape::Arbitrary {
1388 offsets: vec![Size::ZERO],
1389 memory_index: vec![0],
1397 let best_layout = match (tagged_layout, niche_filling_layout) {
1398 (tagged_layout, Some(niche_filling_layout)) => {
1399 // Pick the smaller layout; otherwise,
1400 // pick the layout with the larger niche; otherwise,
1401 // pick tagged as it has simpler codegen.
1402 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1403 let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1404 (layout.size, cmp::Reverse(niche_size))
1407 (tagged_layout, None) => tagged_layout,
1410 tcx.intern_layout(best_layout)
1413 // Types with no meaningful known layout.
1414 ty::Projection(_) | ty::Opaque(..) => {
1415 // NOTE(eddyb) `layout_of` query should've normalized these away,
1416 // if that was possible, so there's no reason to try again here.
1417 return Err(LayoutError::Unknown(ty));
1420 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1421 bug!("Layout::compute: unexpected type `{}`", ty)
1424 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1425 return Err(LayoutError::Unknown(ty));
1431 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1432 #[derive(Clone, Debug, PartialEq)]
1433 enum SavedLocalEligibility {
1435 Assigned(VariantIdx),
1436 // FIXME: Use newtype_index so we aren't wasting bytes
1437 Ineligible(Option<u32>),
1440 // When laying out generators, we divide our saved local fields into two
1441 // categories: overlap-eligible and overlap-ineligible.
1443 // Those fields which are ineligible for overlap go in a "prefix" at the
1444 // beginning of the layout, and always have space reserved for them.
1446 // Overlap-eligible fields are only assigned to one variant, so we lay
1447 // those fields out for each variant and put them right after the
1450 // Finally, in the layout details, we point to the fields from the
1451 // variants they are assigned to. It is possible for some fields to be
1452 // included in multiple variants. No field ever "moves around" in the
1453 // layout; its offset is always the same.
1455 // Also included in the layout are the upvars and the discriminant.
1456 // These are included as fields on the "outer" layout; they are not part
1458 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1459 /// Compute the eligibility and assignment of each local.
1460 fn generator_saved_local_eligibility(
1462 info: &GeneratorLayout<'tcx>,
1463 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1464 use SavedLocalEligibility::*;
1466 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1467 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1469 // The saved locals not eligible for overlap. These will get
1470 // "promoted" to the prefix of our generator.
1471 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1473 // Figure out which of our saved locals are fields in only
1474 // one variant. The rest are deemed ineligible for overlap.
1475 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1476 for local in fields {
1477 match assignments[*local] {
1479 assignments[*local] = Assigned(variant_index);
1482 // We've already seen this local at another suspension
1483 // point, so it is no longer a candidate.
1485 "removing local {:?} in >1 variant ({:?}, {:?})",
1490 ineligible_locals.insert(*local);
1491 assignments[*local] = Ineligible(None);
1498 // Next, check every pair of eligible locals to see if they
1500 for local_a in info.storage_conflicts.rows() {
1501 let conflicts_a = info.storage_conflicts.count(local_a);
1502 if ineligible_locals.contains(local_a) {
1506 for local_b in info.storage_conflicts.iter(local_a) {
1507 // local_a and local_b are storage live at the same time, therefore they
1508 // cannot overlap in the generator layout. The only way to guarantee
1509 // this is if they are in the same variant, or one is ineligible
1510 // (which means it is stored in every variant).
1511 if ineligible_locals.contains(local_b)
1512 || assignments[local_a] == assignments[local_b]
1517 // If they conflict, we will choose one to make ineligible.
1518 // This is not always optimal; it's just a greedy heuristic that
1519 // seems to produce good results most of the time.
1520 let conflicts_b = info.storage_conflicts.count(local_b);
1521 let (remove, other) =
1522 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1523 ineligible_locals.insert(remove);
1524 assignments[remove] = Ineligible(None);
1525 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1529 // Count the number of variants in use. If only one of them, then it is
1530 // impossible to overlap any locals in our layout. In this case it's
1531 // always better to make the remaining locals ineligible, so we can
1532 // lay them out with the other locals in the prefix and eliminate
1533 // unnecessary padding bytes.
1535 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1536 for assignment in &assignments {
1537 if let Assigned(idx) = assignment {
1538 used_variants.insert(*idx);
1541 if used_variants.count() < 2 {
1542 for assignment in assignments.iter_mut() {
1543 *assignment = Ineligible(None);
1545 ineligible_locals.insert_all();
1549 // Write down the order of our locals that will be promoted to the prefix.
1551 for (idx, local) in ineligible_locals.iter().enumerate() {
1552 assignments[local] = Ineligible(Some(idx as u32));
1555 debug!("generator saved local assignments: {:?}", assignments);
1557 (ineligible_locals, assignments)
1560 /// Compute the full generator layout.
1561 fn generator_layout(
1564 def_id: hir::def_id::DefId,
1565 substs: SubstsRef<'tcx>,
1566 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1567 use SavedLocalEligibility::*;
1569 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1571 let Some(info) = tcx.generator_layout(def_id) else {
1572 return Err(LayoutError::Unknown(ty));
1574 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1576 // Build a prefix layout, including "promoting" all ineligible
1577 // locals as part of the prefix. We compute the layout of all of
1578 // these fields at once to get optimal packing.
1579 let tag_index = substs.as_generator().prefix_tys().count();
1581 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1582 let max_discr = (info.variant_fields.len() - 1) as u128;
1583 let discr_int = Integer::fit_unsigned(max_discr);
1584 let discr_int_ty = discr_int.to_ty(tcx, false);
1586 value: Primitive::Int(discr_int, false),
1587 valid_range: WrappingRange { start: 0, end: max_discr },
1589 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag));
1590 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1592 let promoted_layouts = ineligible_locals
1594 .map(|local| subst_field(info.field_tys[local]))
1595 .map(|ty| tcx.mk_maybe_uninit(ty))
1596 .map(|ty| self.layout_of(ty));
1597 let prefix_layouts = substs
1600 .map(|ty| self.layout_of(ty))
1601 .chain(iter::once(Ok(tag_layout)))
1602 .chain(promoted_layouts)
1603 .collect::<Result<Vec<_>, _>>()?;
1604 let prefix = self.univariant_uninterned(
1607 &ReprOptions::default(),
1608 StructKind::AlwaysSized,
1611 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1613 // Split the prefix layout into the "outer" fields (upvars and
1614 // discriminant) and the "promoted" fields. Promoted fields will
1615 // get included in each variant that requested them in
1617 debug!("prefix = {:#?}", prefix);
1618 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1619 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1620 let mut inverse_memory_index = invert_mapping(&memory_index);
1622 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1623 // "outer" and "promoted" fields respectively.
1624 let b_start = (tag_index + 1) as u32;
1625 let offsets_b = offsets.split_off(b_start as usize);
1626 let offsets_a = offsets;
1628 // Disentangle the "a" and "b" components of `inverse_memory_index`
1629 // by preserving the order but keeping only one disjoint "half" each.
1630 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1631 let inverse_memory_index_b: Vec<_> =
1632 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1633 inverse_memory_index.retain(|&i| i < b_start);
1634 let inverse_memory_index_a = inverse_memory_index;
1636 // Since `inverse_memory_index_{a,b}` each only refer to their
1637 // respective fields, they can be safely inverted
1638 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1639 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1642 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1643 (outer_fields, offsets_b, memory_index_b)
1648 let mut size = prefix.size;
1649 let mut align = prefix.align;
1653 .map(|(index, variant_fields)| {
1654 // Only include overlap-eligible fields when we compute our variant layout.
1655 let variant_only_tys = variant_fields
1657 .filter(|local| match assignments[**local] {
1658 Unassigned => bug!(),
1659 Assigned(v) if v == index => true,
1660 Assigned(_) => bug!("assignment does not match variant"),
1661 Ineligible(_) => false,
1663 .map(|local| subst_field(info.field_tys[*local]));
1665 let mut variant = self.univariant_uninterned(
1668 .map(|ty| self.layout_of(ty))
1669 .collect::<Result<Vec<_>, _>>()?,
1670 &ReprOptions::default(),
1671 StructKind::Prefixed(prefix_size, prefix_align.abi),
1673 variant.variants = Variants::Single { index };
1675 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1679 // Now, stitch the promoted and variant-only fields back together in
1680 // the order they are mentioned by our GeneratorLayout.
1681 // Because we only use some subset (that can differ between variants)
1682 // of the promoted fields, we can't just pick those elements of the
1683 // `promoted_memory_index` (as we'd end up with gaps).
1684 // So instead, we build an "inverse memory_index", as if all of the
1685 // promoted fields were being used, but leave the elements not in the
1686 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1687 // obtain a valid (bijective) mapping.
1688 const INVALID_FIELD_IDX: u32 = !0;
1689 let mut combined_inverse_memory_index =
1690 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1691 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1692 let combined_offsets = variant_fields
1696 let (offset, memory_index) = match assignments[*local] {
1697 Unassigned => bug!(),
1699 let (offset, memory_index) =
1700 offsets_and_memory_index.next().unwrap();
1701 (offset, promoted_memory_index.len() as u32 + memory_index)
1703 Ineligible(field_idx) => {
1704 let field_idx = field_idx.unwrap() as usize;
1705 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1708 combined_inverse_memory_index[memory_index as usize] = i as u32;
1713 // Remove the unused slots and invert the mapping to obtain the
1714 // combined `memory_index` (also see previous comment).
1715 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1716 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1718 variant.fields = FieldsShape::Arbitrary {
1719 offsets: combined_offsets,
1720 memory_index: combined_memory_index,
1723 size = size.max(variant.size);
1724 align = align.max(variant.align);
1727 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1729 size = size.align_to(align.abi);
1731 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1735 Abi::Aggregate { sized: true }
1738 let layout = tcx.intern_layout(Layout {
1739 variants: Variants::Multiple {
1741 tag_encoding: TagEncoding::Direct,
1742 tag_field: tag_index,
1745 fields: outer_fields,
1747 largest_niche: prefix.largest_niche,
1751 debug!("generator layout ({:?}): {:#?}", ty, layout);
1755 /// This is invoked by the `layout_of` query to record the final
1756 /// layout of each type.
1758 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1759 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1760 // for dumping later.
1761 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1762 self.record_layout_for_printing_outlined(layout)
1766 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1767 // Ignore layouts that are done with non-empty environments or
1768 // non-monomorphic layouts, as the user only wants to see the stuff
1769 // resulting from the final codegen session.
1770 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1774 // (delay format until we actually need it)
1775 let record = |kind, packed, opt_discr_size, variants| {
1776 let type_desc = format!("{:?}", layout.ty);
1777 self.tcx.sess.code_stats.record_type_size(
1788 let adt_def = match *layout.ty.kind() {
1789 ty::Adt(ref adt_def, _) => {
1790 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1794 ty::Closure(..) => {
1795 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1796 record(DataTypeKind::Closure, false, None, vec![]);
1801 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1806 let adt_kind = adt_def.adt_kind();
1807 let adt_packed = adt_def.repr.pack.is_some();
1809 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1810 let mut min_size = Size::ZERO;
1811 let field_info: Vec<_> = flds
1815 let field_layout = layout.field(self, i);
1816 let offset = layout.fields.offset(i);
1817 let field_end = offset + field_layout.size;
1818 if min_size < field_end {
1819 min_size = field_end;
1822 name: name.to_string(),
1823 offset: offset.bytes(),
1824 size: field_layout.size.bytes(),
1825 align: field_layout.align.abi.bytes(),
1831 name: n.map(|n| n.to_string()),
1832 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1833 align: layout.align.abi.bytes(),
1834 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1839 match layout.variants {
1840 Variants::Single { index } => {
1841 if !adt_def.variants.is_empty() && layout.fields != FieldsShape::Primitive {
1843 "print-type-size `{:#?}` variant {}",
1844 layout, adt_def.variants[index].name
1846 let variant_def = &adt_def.variants[index];
1847 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1852 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1855 // (This case arises for *empty* enums; so give it
1857 record(adt_kind.into(), adt_packed, None, vec![]);
1861 Variants::Multiple { tag, ref tag_encoding, .. } => {
1863 "print-type-size `{:#?}` adt general variants def {}",
1865 adt_def.variants.len()
1867 let variant_infos: Vec<_> = adt_def
1870 .map(|(i, variant_def)| {
1871 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1873 Some(variant_def.name),
1875 layout.for_variant(self, i),
1882 match tag_encoding {
1883 TagEncoding::Direct => Some(tag.value.size(self)),
1893 /// Type size "skeleton", i.e., the only information determining a type's size.
1894 /// While this is conservative, (aside from constant sizes, only pointers,
1895 /// newtypes thereof and null pointer optimized enums are allowed), it is
1896 /// enough to statically check common use cases of transmute.
1897 #[derive(Copy, Clone, Debug)]
1898 pub enum SizeSkeleton<'tcx> {
1899 /// Any statically computable Layout.
1902 /// A potentially-fat pointer.
1904 /// If true, this pointer is never null.
1906 /// The type which determines the unsized metadata, if any,
1907 /// of this pointer. Either a type parameter or a projection
1908 /// depending on one, with regions erased.
1913 impl<'tcx> SizeSkeleton<'tcx> {
1917 param_env: ty::ParamEnv<'tcx>,
1918 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1919 debug_assert!(!ty.has_infer_types_or_consts());
1921 // First try computing a static layout.
1922 let err = match tcx.layout_of(param_env.and(ty)) {
1924 return Ok(SizeSkeleton::Known(layout.size));
1930 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1931 let non_zero = !ty.is_unsafe_ptr();
1932 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1934 ty::Param(_) | ty::Projection(_) => {
1935 debug_assert!(tail.has_param_types_or_consts());
1936 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1939 "SizeSkeleton::compute({}): layout errored ({}), yet \
1940 tail `{}` is not a type parameter or a projection",
1948 ty::Adt(def, substs) => {
1949 // Only newtypes and enums w/ nullable pointer optimization.
1950 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1954 // Get a zero-sized variant or a pointer newtype.
1955 let zero_or_ptr_variant = |i| {
1956 let i = VariantIdx::new(i);
1957 let fields = def.variants[i]
1960 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1962 for field in fields {
1965 SizeSkeleton::Known(size) => {
1966 if size.bytes() > 0 {
1970 SizeSkeleton::Pointer { .. } => {
1981 let v0 = zero_or_ptr_variant(0)?;
1983 if def.variants.len() == 1 {
1984 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1985 return Ok(SizeSkeleton::Pointer {
1987 || match tcx.layout_scalar_valid_range(def.did) {
1988 (Bound::Included(start), Bound::Unbounded) => start > 0,
1989 (Bound::Included(start), Bound::Included(end)) => {
1990 0 < start && start < end
2001 let v1 = zero_or_ptr_variant(1)?;
2002 // Nullable pointer enum optimization.
2004 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2005 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2006 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2012 ty::Projection(_) | ty::Opaque(..) => {
2013 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2014 if ty == normalized {
2017 SizeSkeleton::compute(normalized, tcx, param_env)
2025 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2026 match (self, other) {
2027 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2028 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2036 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2037 fn tcx(&self) -> TyCtxt<'tcx>;
2040 pub trait HasParamEnv<'tcx> {
2041 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2044 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2046 fn data_layout(&self) -> &TargetDataLayout {
2051 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2052 fn target_spec(&self) -> &Target {
2057 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2059 fn tcx(&self) -> TyCtxt<'tcx> {
2064 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2066 fn data_layout(&self) -> &TargetDataLayout {
2071 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2072 fn target_spec(&self) -> &Target {
2077 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2079 fn tcx(&self) -> TyCtxt<'tcx> {
2084 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2085 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2090 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2091 fn data_layout(&self) -> &TargetDataLayout {
2092 self.tcx.data_layout()
2096 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2097 fn target_spec(&self) -> &Target {
2098 self.tcx.target_spec()
2102 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2103 fn tcx(&self) -> TyCtxt<'tcx> {
2108 pub trait MaybeResult<T> {
2111 fn from(x: Result<T, Self::Error>) -> Self;
2112 fn to_result(self) -> Result<T, Self::Error>;
2115 impl<T> MaybeResult<T> for T {
2118 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2121 fn to_result(self) -> Result<T, Self::Error> {
2126 impl<T, E> MaybeResult<T> for Result<T, E> {
2129 fn from(x: Result<T, Self::Error>) -> Self {
2132 fn to_result(self) -> Result<T, Self::Error> {
2137 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2139 /// Trait for contexts that want to be able to compute layouts of types.
2140 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2141 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2142 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2143 /// returned from `layout_of` (see also `handle_layout_err`).
2144 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2146 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2147 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2149 fn layout_tcx_at_span(&self) -> Span {
2153 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2154 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2156 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2157 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2158 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2159 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2160 fn handle_layout_err(
2162 err: LayoutError<'tcx>,
2165 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2168 /// Blanket extension trait for contexts that can compute layouts of types.
2169 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2170 /// Computes the layout of a type. Note that this implicitly
2171 /// executes in "reveal all" mode, and will normalize the input type.
2173 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2174 self.spanned_layout_of(ty, DUMMY_SP)
2177 /// Computes the layout of a type, at `span`. Note that this implicitly
2178 /// executes in "reveal all" mode, and will normalize the input type.
2179 // FIXME(eddyb) avoid passing information like this, and instead add more
2180 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2182 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2183 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2184 let tcx = self.tcx().at(span);
2187 tcx.layout_of(self.param_env().and(ty))
2188 .map_err(|err| self.handle_layout_err(err, span, ty)),
2193 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2195 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2196 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2199 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2204 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2205 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2208 fn layout_tcx_at_span(&self) -> Span {
2213 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2218 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2220 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2222 fn ty_and_layout_for_variant(
2223 this: TyAndLayout<'tcx>,
2225 variant_index: VariantIdx,
2226 ) -> TyAndLayout<'tcx> {
2227 let layout = match this.variants {
2228 Variants::Single { index }
2229 // If all variants but one are uninhabited, the variant layout is the enum layout.
2230 if index == variant_index &&
2231 // Don't confuse variants of uninhabited enums with the enum itself.
2232 // For more details see https://github.com/rust-lang/rust/issues/69763.
2233 this.fields != FieldsShape::Primitive =>
2238 Variants::Single { index } => {
2240 let param_env = cx.param_env();
2242 // Deny calling for_variant more than once for non-Single enums.
2243 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2244 assert_eq!(original_layout.variants, Variants::Single { index });
2247 let fields = match this.ty.kind() {
2248 ty::Adt(def, _) if def.variants.is_empty() =>
2249 bug!("for_variant called on zero-variant enum"),
2250 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2253 tcx.intern_layout(Layout {
2254 variants: Variants::Single { index: variant_index },
2255 fields: match NonZeroUsize::new(fields) {
2256 Some(fields) => FieldsShape::Union(fields),
2257 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2259 abi: Abi::Uninhabited,
2260 largest_niche: None,
2261 align: tcx.data_layout.i8_align,
2266 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2269 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2271 TyAndLayout { ty: this.ty, layout }
2274 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2275 enum TyMaybeWithLayout<'tcx> {
2277 TyAndLayout(TyAndLayout<'tcx>),
2280 fn field_ty_or_layout<'tcx>(
2281 this: TyAndLayout<'tcx>,
2282 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2284 ) -> TyMaybeWithLayout<'tcx> {
2286 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2287 let layout = Layout::scalar(cx, tag);
2288 TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
2291 match *this.ty.kind() {
2300 | ty::GeneratorWitness(..)
2302 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2304 // Potentially-fat pointers.
2305 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2306 assert!(i < this.fields.count());
2308 // Reuse the fat `*T` type as its own thin pointer data field.
2309 // This provides information about, e.g., DST struct pointees
2310 // (which may have no non-DST form), and will work as long
2311 // as the `Abi` or `FieldsShape` is checked by users.
2313 let nil = tcx.mk_unit();
2314 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2317 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2320 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2321 // the `Result` should always work because the type is
2322 // always either `*mut ()` or `&'static mut ()`.
2323 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2325 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2329 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2330 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2331 ty::Dynamic(_, _) => {
2332 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2333 tcx.lifetimes.re_static,
2334 tcx.mk_array(tcx.types.usize, 3),
2336 /* FIXME: use actual fn pointers
2337 Warning: naively computing the number of entries in the
2338 vtable by counting the methods on the trait + methods on
2339 all parent traits does not work, because some methods can
2340 be not object safe and thus excluded from the vtable.
2341 Increase this counter if you tried to implement this but
2342 failed to do it without duplicating a lot of code from
2343 other places in the compiler: 2
2345 tcx.mk_array(tcx.types.usize, 3),
2346 tcx.mk_array(Option<fn()>),
2350 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2354 // Arrays and slices.
2355 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2356 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2358 // Tuples, generators and closures.
2359 ty::Closure(_, ref substs) => field_ty_or_layout(
2360 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2365 ty::Generator(def_id, ref substs, _) => match this.variants {
2366 Variants::Single { index } => TyMaybeWithLayout::Ty(
2369 .state_tys(def_id, tcx)
2370 .nth(index.as_usize())
2375 Variants::Multiple { tag, tag_field, .. } => {
2377 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2379 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2383 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2386 ty::Adt(def, substs) => {
2387 match this.variants {
2388 Variants::Single { index } => {
2389 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2392 // Discriminant field for enums (where applicable).
2393 Variants::Multiple { tag, .. } => {
2395 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2402 | ty::Placeholder(..)
2406 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2410 match field_ty_or_layout(this, cx, i) {
2411 TyMaybeWithLayout::Ty(field_ty) => {
2412 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2414 "failed to get layout for `{}`: {},\n\
2415 despite it being a field (#{}) of an existing layout: {:#?}",
2423 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2427 fn ty_and_layout_pointee_info_at(
2428 this: TyAndLayout<'tcx>,
2431 ) -> Option<PointeeInfo> {
2433 let param_env = cx.param_env();
2435 let addr_space_of_ty = |ty: Ty<'tcx>| {
2436 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2439 let pointee_info = match *this.ty.kind() {
2440 ty::RawPtr(mt) if offset.bytes() == 0 => {
2441 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2443 align: layout.align.abi,
2445 address_space: addr_space_of_ty(mt.ty),
2448 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2449 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2451 align: layout.align.abi,
2453 address_space: cx.data_layout().instruction_address_space,
2456 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2457 let address_space = addr_space_of_ty(ty);
2458 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2459 // Use conservative pointer kind if not optimizing. This saves us the
2460 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2461 // attributes in LLVM have compile-time cost even in unoptimized builds).
2465 hir::Mutability::Not => {
2466 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2472 hir::Mutability::Mut => {
2473 // References to self-referential structures should not be considered
2474 // noalias, as another pointer to the structure can be obtained, that
2475 // is not based-on the original reference. We consider all !Unpin
2476 // types to be potentially self-referential here.
2477 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2478 PointerKind::UniqueBorrowed
2486 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2488 align: layout.align.abi,
2495 let mut data_variant = match this.variants {
2496 // Within the discriminant field, only the niche itself is
2497 // always initialized, so we only check for a pointer at its
2500 // If the niche is a pointer, it's either valid (according
2501 // to its type), or null (which the niche field's scalar
2502 // validity range encodes). This allows using
2503 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2504 // this will continue to work as long as we don't start
2505 // using more niches than just null (e.g., the first page of
2506 // the address space, or unaligned pointers).
2507 Variants::Multiple {
2508 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2511 } if this.fields.offset(tag_field) == offset => {
2512 Some(this.for_variant(cx, dataful_variant))
2517 if let Some(variant) = data_variant {
2518 // We're not interested in any unions.
2519 if let FieldsShape::Union(_) = variant.fields {
2520 data_variant = None;
2524 let mut result = None;
2526 if let Some(variant) = data_variant {
2527 let ptr_end = offset + Pointer.size(cx);
2528 for i in 0..variant.fields.count() {
2529 let field_start = variant.fields.offset(i);
2530 if field_start <= offset {
2531 let field = variant.field(cx, i);
2532 result = field.to_result().ok().and_then(|field| {
2533 if ptr_end <= field_start + field.size {
2534 // We found the right field, look inside it.
2536 field.pointee_info_at(cx, offset - field_start);
2542 if result.is_some() {
2549 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2550 if let Some(ref mut pointee) = result {
2551 if let ty::Adt(def, _) = this.ty.kind() {
2552 if def.is_box() && offset.bytes() == 0 {
2553 pointee.safe = Some(PointerKind::UniqueOwned);
2563 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2573 impl<'tcx> ty::Instance<'tcx> {
2574 // NOTE(eddyb) this is private to avoid using it from outside of
2575 // `fn_abi_of_instance` - any other uses are either too high-level
2576 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2577 // or should go through `FnAbi` instead, to avoid losing any
2578 // adjustments `fn_abi_of_instance` might be performing.
2579 fn fn_sig_for_fn_abi(
2582 param_env: ty::ParamEnv<'tcx>,
2583 ) -> ty::PolyFnSig<'tcx> {
2584 let ty = self.ty(tcx, param_env);
2587 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2588 // parameters unused if they show up in the signature, but not in the `mir::Body`
2589 // (i.e. due to being inside a projection that got normalized, see
2590 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2591 // track of a polymorphization `ParamEnv` to allow normalizing later.
2592 let mut sig = match *ty.kind() {
2593 ty::FnDef(def_id, substs) => tcx
2594 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2595 .subst(tcx, substs),
2596 _ => unreachable!(),
2599 if let ty::InstanceDef::VtableShim(..) = self.def {
2600 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2601 sig = sig.map_bound(|mut sig| {
2602 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2603 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2604 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2610 ty::Closure(def_id, substs) => {
2611 let sig = substs.as_closure().sig();
2613 let bound_vars = tcx.mk_bound_variable_kinds(
2616 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2618 let br = ty::BoundRegion {
2619 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2620 kind: ty::BoundRegionKind::BrEnv,
2622 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2623 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2625 let sig = sig.skip_binder();
2626 ty::Binder::bind_with_vars(
2628 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2637 ty::Generator(_, substs, _) => {
2638 let sig = substs.as_generator().poly_sig();
2640 let bound_vars = tcx.mk_bound_variable_kinds(
2643 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2645 let br = ty::BoundRegion {
2646 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2647 kind: ty::BoundRegionKind::BrEnv,
2649 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2650 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2652 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2653 let pin_adt_ref = tcx.adt_def(pin_did);
2654 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2655 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2657 let sig = sig.skip_binder();
2658 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2659 let state_adt_ref = tcx.adt_def(state_did);
2660 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2661 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2662 ty::Binder::bind_with_vars(
2664 [env_ty, sig.resume_ty].iter(),
2667 hir::Unsafety::Normal,
2668 rustc_target::spec::abi::Abi::Rust,
2673 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2678 /// Calculates whether a function's ABI can unwind or not.
2680 /// This takes two primary parameters:
2682 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2683 /// codegen attrs for a defined function. For function pointers this set of
2684 /// flags is the empty set. This is only applicable for Rust-defined
2685 /// functions, and generally isn't needed except for small optimizations where
2686 /// we try to say a function which otherwise might look like it could unwind
2687 /// doesn't actually unwind (such as for intrinsics and such).
2689 /// * `abi` - this is the ABI that the function is defined with. This is the
2690 /// primary factor for determining whether a function can unwind or not.
2692 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2693 /// panics are implemented with unwinds on most platform (when
2694 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2695 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2696 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2697 /// defined for each ABI individually, but it always corresponds to some form of
2698 /// stack-based unwinding (the exact mechanism of which varies
2699 /// platform-by-platform).
2701 /// Rust functions are classfied whether or not they can unwind based on the
2702 /// active "panic strategy". In other words Rust functions are considered to
2703 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2704 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2705 /// only if the final panic mode is panic=abort. In this scenario any code
2706 /// previously compiled assuming that a function can unwind is still correct, it
2707 /// just never happens to actually unwind at runtime.
2709 /// This function's answer to whether or not a function can unwind is quite
2710 /// impactful throughout the compiler. This affects things like:
2712 /// * Calling a function which can't unwind means codegen simply ignores any
2713 /// associated unwinding cleanup.
2714 /// * Calling a function which can unwind from a function which can't unwind
2715 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2716 /// aborts the process.
2717 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2718 /// affects various optimizations and codegen.
2720 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2721 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2722 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2723 /// might (from a foreign exception or similar).
2725 pub fn fn_can_unwind<'tcx>(
2727 codegen_fn_attr_flags: CodegenFnAttrFlags,
2730 // Special attribute for functions which can't unwind.
2731 if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2735 // Otherwise if this isn't special then unwinding is generally determined by
2736 // the ABI of the itself. ABIs like `C` have variants which also
2737 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2738 // ABIs have such an option. Otherwise the only other thing here is Rust
2739 // itself, and those ABIs are determined by the panic strategy configured
2740 // for this compilation.
2742 // Unfortunately at this time there's also another caveat. Rust [RFC
2743 // 2945][rfc] has been accepted and is in the process of being implemented
2744 // and stabilized. In this interim state we need to deal with historical
2745 // rustc behavior as well as plan for future rustc behavior.
2747 // Historically functions declared with `extern "C"` were marked at the
2748 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2749 // or not. This is UB for functions in `panic=unwind` mode that then
2750 // actually panic and unwind. Note that this behavior is true for both
2751 // externally declared functions as well as Rust-defined function.
2753 // To fix this UB rustc would like to change in the future to catch unwinds
2754 // from function calls that may unwind within a Rust-defined `extern "C"`
2755 // function and forcibly abort the process, thereby respecting the
2756 // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2757 // ready to roll out, so determining whether or not the `C` family of ABIs
2758 // unwinds is conditional not only on their definition but also whether the
2759 // `#![feature(c_unwind)]` feature gate is active.
2761 // Note that this means that unlike historical compilers rustc now, by
2762 // default, unconditionally thinks that the `C` ABI may unwind. This will
2763 // prevent some optimization opportunities, however, so we try to scope this
2764 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2765 // to `panic=abort`).
2767 // Eventually the check against `c_unwind` here will ideally get removed and
2768 // this'll be a little cleaner as it'll be a straightforward check of the
2771 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2777 | Stdcall { unwind }
2778 | Fastcall { unwind }
2779 | Vectorcall { unwind }
2780 | Thiscall { unwind }
2783 | SysV64 { unwind } => {
2785 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2793 | AvrNonBlockingInterrupt
2794 | CCmseNonSecureCall
2798 | Unadjusted => false,
2799 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2804 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2805 use rustc_target::spec::abi::Abi::*;
2806 match tcx.sess.target.adjust_abi(abi) {
2807 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2809 // It's the ABI's job to select this, not ours.
2810 System { .. } => bug!("system abi should be selected elsewhere"),
2811 EfiApi => bug!("eficall abi should be selected elsewhere"),
2813 Stdcall { .. } => Conv::X86Stdcall,
2814 Fastcall { .. } => Conv::X86Fastcall,
2815 Vectorcall { .. } => Conv::X86VectorCall,
2816 Thiscall { .. } => Conv::X86ThisCall,
2817 C { .. } => Conv::C,
2818 Unadjusted => Conv::C,
2819 Win64 { .. } => Conv::X86_64Win64,
2820 SysV64 { .. } => Conv::X86_64SysV,
2821 Aapcs { .. } => Conv::ArmAapcs,
2822 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2823 PtxKernel => Conv::PtxKernel,
2824 Msp430Interrupt => Conv::Msp430Intr,
2825 X86Interrupt => Conv::X86Intr,
2826 AmdGpuKernel => Conv::AmdGpuKernel,
2827 AvrInterrupt => Conv::AvrInterrupt,
2828 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2831 // These API constants ought to be more specific...
2832 Cdecl { .. } => Conv::C,
2836 /// Error produced by attempting to compute or adjust a `FnAbi`.
2837 #[derive(Copy, Clone, Debug, HashStable)]
2838 pub enum FnAbiError<'tcx> {
2839 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2840 Layout(LayoutError<'tcx>),
2842 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2843 AdjustForForeignAbi(call::AdjustForForeignAbiError),
2846 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2847 fn from(err: LayoutError<'tcx>) -> Self {
2852 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2853 fn from(err: call::AdjustForForeignAbiError) -> Self {
2854 Self::AdjustForForeignAbi(err)
2858 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2859 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2861 Self::Layout(err) => err.fmt(f),
2862 Self::AdjustForForeignAbi(err) => err.fmt(f),
2867 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2868 // just for error handling.
2870 pub enum FnAbiRequest<'tcx> {
2871 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2872 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2875 /// Trait for contexts that want to be able to compute `FnAbi`s.
2876 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2877 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2878 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2879 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2880 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2882 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2883 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2885 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2886 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2887 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2888 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2889 fn handle_fn_abi_err(
2891 err: FnAbiError<'tcx>,
2893 fn_abi_request: FnAbiRequest<'tcx>,
2894 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2897 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2898 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2899 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2901 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2902 /// instead, where the instance is an `InstanceDef::Virtual`.
2904 fn fn_abi_of_fn_ptr(
2906 sig: ty::PolyFnSig<'tcx>,
2907 extra_args: &'tcx ty::List<Ty<'tcx>>,
2908 ) -> Self::FnAbiOfResult {
2909 // FIXME(eddyb) get a better `span` here.
2910 let span = self.layout_tcx_at_span();
2911 let tcx = self.tcx().at(span);
2913 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2914 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2918 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2919 /// direct calls to an `fn`.
2921 /// NB: that includes virtual calls, which are represented by "direct calls"
2922 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2924 fn fn_abi_of_instance(
2926 instance: ty::Instance<'tcx>,
2927 extra_args: &'tcx ty::List<Ty<'tcx>>,
2928 ) -> Self::FnAbiOfResult {
2929 // FIXME(eddyb) get a better `span` here.
2930 let span = self.layout_tcx_at_span();
2931 let tcx = self.tcx().at(span);
2934 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2935 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2936 // we can get some kind of span even if one wasn't provided.
2937 // However, we don't do this early in order to avoid calling
2938 // `def_span` unconditionally (which may have a perf penalty).
2939 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2940 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2946 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2948 fn fn_abi_of_fn_ptr<'tcx>(
2950 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2951 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2952 let (param_env, (sig, extra_args)) = query.into_parts();
2954 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2958 CodegenFnAttrFlags::empty(),
2963 fn fn_abi_of_instance<'tcx>(
2965 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2966 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2967 let (param_env, (instance, extra_args)) = query.into_parts();
2969 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
2971 let caller_location = if instance.def.requires_caller_location(tcx) {
2972 Some(tcx.caller_location_ty())
2977 let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
2979 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2984 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2988 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
2989 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
2990 // arguments of this method, into a separate `struct`.
2991 fn fn_abi_new_uncached(
2993 sig: ty::PolyFnSig<'tcx>,
2994 extra_args: &[Ty<'tcx>],
2995 caller_location: Option<Ty<'tcx>>,
2996 codegen_fn_attr_flags: CodegenFnAttrFlags,
2997 // FIXME(eddyb) replace this with something typed, like an `enum`.
2998 force_thin_self_ptr: bool,
2999 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3000 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3002 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3004 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3006 let mut inputs = sig.inputs();
3007 let extra_args = if sig.abi == RustCall {
3008 assert!(!sig.c_variadic && extra_args.is_empty());
3010 if let Some(input) = sig.inputs().last() {
3011 if let ty::Tuple(tupled_arguments) = input.kind() {
3012 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3016 "argument to function with \"rust-call\" ABI \
3022 "argument to function with \"rust-call\" ABI \
3027 assert!(sig.c_variadic || extra_args.is_empty());
3031 let target = &self.tcx.sess.target;
3032 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3033 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3034 let linux_s390x_gnu_like =
3035 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3036 let linux_sparc64_gnu_like =
3037 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3038 let linux_powerpc_gnu_like =
3039 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3041 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3043 // Handle safe Rust thin and fat pointers.
3044 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3046 layout: TyAndLayout<'tcx>,
3049 // Booleans are always a noundef i1 that needs to be zero-extended.
3050 if scalar.is_bool() {
3051 attrs.ext(ArgExtension::Zext);
3052 attrs.set(ArgAttribute::NoUndef);
3056 // Scalars which have invalid values cannot be undef.
3057 if !scalar.is_always_valid(self) {
3058 attrs.set(ArgAttribute::NoUndef);
3061 // Only pointer types handled below.
3062 if scalar.value != Pointer {
3066 if !scalar.valid_range.contains(0) {
3067 attrs.set(ArgAttribute::NonNull);
3070 if let Some(pointee) = layout.pointee_info_at(self, offset) {
3071 if let Some(kind) = pointee.safe {
3072 attrs.pointee_align = Some(pointee.align);
3074 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3075 // for the entire duration of the function as they can be deallocated
3076 // at any time. Set their valid size to 0.
3077 attrs.pointee_size = match kind {
3078 PointerKind::UniqueOwned => Size::ZERO,
3082 // `Box`, `&T`, and `&mut T` cannot be undef.
3083 // Note that this only applies to the value of the pointer itself;
3084 // this attribute doesn't make it UB for the pointed-to data to be undef.
3085 attrs.set(ArgAttribute::NoUndef);
3087 // `Box` pointer parameters never alias because ownership is transferred
3088 // `&mut` pointer parameters never alias other parameters,
3089 // or mutable global data
3091 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3092 // and can be marked as both `readonly` and `noalias`, as
3093 // LLVM's definition of `noalias` is based solely on memory
3094 // dependencies rather than pointer equality
3096 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3097 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3098 // or not to actually emit the attribute. It can also be controlled with the
3099 // `-Zmutable-noalias` debugging option.
3100 let no_alias = match kind {
3101 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3102 PointerKind::UniqueOwned => true,
3103 PointerKind::Frozen => !is_return,
3106 attrs.set(ArgAttribute::NoAlias);
3109 if kind == PointerKind::Frozen && !is_return {
3110 attrs.set(ArgAttribute::ReadOnly);
3113 if kind == PointerKind::UniqueBorrowed && !is_return {
3114 attrs.set(ArgAttribute::NoAliasMutRef);
3120 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3121 let is_return = arg_idx.is_none();
3123 let layout = self.layout_of(ty)?;
3124 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3125 // Don't pass the vtable, it's not an argument of the virtual fn.
3126 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3127 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3128 make_thin_self_ptr(self, layout)
3133 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3134 let mut attrs = ArgAttributes::new();
3135 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3139 if arg.layout.is_zst() {
3140 // For some forsaken reason, x86_64-pc-windows-gnu
3141 // doesn't ignore zero-sized struct arguments.
3142 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3146 && !linux_s390x_gnu_like
3147 && !linux_sparc64_gnu_like
3148 && !linux_powerpc_gnu_like)
3150 arg.mode = PassMode::Ignore;
3157 let mut fn_abi = FnAbi {
3158 ret: arg_of(sig.output(), None)?,
3162 .chain(extra_args.iter().copied())
3163 .chain(caller_location)
3165 .map(|(i, ty)| arg_of(ty, Some(i)))
3166 .collect::<Result<_, _>>()?,
3167 c_variadic: sig.c_variadic,
3168 fixed_count: inputs.len(),
3170 can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3172 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3173 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3174 Ok(self.tcx.arena.alloc(fn_abi))
3177 fn fn_abi_adjust_for_abi(
3179 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3181 ) -> Result<(), FnAbiError<'tcx>> {
3182 if abi == SpecAbi::Unadjusted {
3186 if abi == SpecAbi::Rust
3187 || abi == SpecAbi::RustCall
3188 || abi == SpecAbi::RustIntrinsic
3189 || abi == SpecAbi::PlatformIntrinsic
3191 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3192 if arg.is_ignore() {
3196 match arg.layout.abi {
3197 Abi::Aggregate { .. } => {}
3199 // This is a fun case! The gist of what this is doing is
3200 // that we want callers and callees to always agree on the
3201 // ABI of how they pass SIMD arguments. If we were to *not*
3202 // make these arguments indirect then they'd be immediates
3203 // in LLVM, which means that they'd used whatever the
3204 // appropriate ABI is for the callee and the caller. That
3205 // means, for example, if the caller doesn't have AVX
3206 // enabled but the callee does, then passing an AVX argument
3207 // across this boundary would cause corrupt data to show up.
3209 // This problem is fixed by unconditionally passing SIMD
3210 // arguments through memory between callers and callees
3211 // which should get them all to agree on ABI regardless of
3212 // target feature sets. Some more information about this
3213 // issue can be found in #44367.
3215 // Note that the platform intrinsic ABI is exempt here as
3216 // that's how we connect up to LLVM and it's unstable
3217 // anyway, we control all calls to it in libstd.
3219 if abi != SpecAbi::PlatformIntrinsic
3220 && self.tcx.sess.target.simd_types_indirect =>
3222 arg.make_indirect();
3229 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
3230 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
3231 let max_by_val_size = Pointer.size(self) * 2;
3232 let size = arg.layout.size;
3234 if arg.layout.is_unsized() || size > max_by_val_size {
3235 arg.make_indirect();
3237 // We want to pass small aggregates as immediates, but using
3238 // a LLVM aggregate type for this leads to bad optimizations,
3239 // so we pick an appropriately sized integer type instead.
3240 arg.cast_to(Reg { kind: RegKind::Integer, size });
3243 fixup(&mut fn_abi.ret);
3244 for arg in &mut fn_abi.args {
3248 fn_abi.adjust_for_foreign_abi(self, abi)?;
3255 fn make_thin_self_ptr<'tcx>(
3256 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3257 layout: TyAndLayout<'tcx>,
3258 ) -> TyAndLayout<'tcx> {
3260 let fat_pointer_ty = if layout.is_unsized() {
3261 // unsized `self` is passed as a pointer to `self`
3262 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3263 tcx.mk_mut_ptr(layout.ty)
3266 Abi::ScalarPair(..) => (),
3267 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3270 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3271 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3272 // elsewhere in the compiler as a method on a `dyn Trait`.
3273 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3274 // get a built-in pointer type
3275 let mut fat_pointer_layout = layout;
3276 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3277 && !fat_pointer_layout.ty.is_region_ptr()
3279 for i in 0..fat_pointer_layout.fields.count() {
3280 let field_layout = fat_pointer_layout.field(cx, i);
3282 if !field_layout.is_zst() {
3283 fat_pointer_layout = field_layout;
3284 continue 'descend_newtypes;
3288 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3291 fat_pointer_layout.ty
3294 // we now have a type like `*mut RcBox<dyn Trait>`
3295 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3296 // this is understood as a special case elsewhere in the compiler
3297 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3302 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3303 // should always work because the type is always `*mut ()`.
3304 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()