1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_attr as attr;
9 use rustc_hir::lang_items::LangItem;
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::{Idx, IndexVec};
12 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
13 use rustc_span::symbol::Symbol;
14 use rustc_span::{Span, DUMMY_SP};
15 use rustc_target::abi::call::{
16 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
18 use rustc_target::abi::*;
19 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
24 use std::num::NonZeroUsize;
27 use rand::{seq::SliceRandom, SeedableRng};
28 use rand_xoshiro::Xoshiro128StarStar;
30 pub fn provide(providers: &mut ty::query::Providers) {
32 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
35 pub trait IntegerExt {
36 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
37 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
38 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
39 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
49 impl IntegerExt for Integer {
51 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
52 match (*self, signed) {
53 (I8, false) => tcx.types.u8,
54 (I16, false) => tcx.types.u16,
55 (I32, false) => tcx.types.u32,
56 (I64, false) => tcx.types.u64,
57 (I128, false) => tcx.types.u128,
58 (I8, true) => tcx.types.i8,
59 (I16, true) => tcx.types.i16,
60 (I32, true) => tcx.types.i32,
61 (I64, true) => tcx.types.i64,
62 (I128, true) => tcx.types.i128,
66 /// Gets the Integer type from an attr::IntType.
67 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
68 let dl = cx.data_layout();
71 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
72 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
73 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
74 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
75 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
76 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
77 dl.ptr_sized_integer()
82 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
85 ty::IntTy::I16 => I16,
86 ty::IntTy::I32 => I32,
87 ty::IntTy::I64 => I64,
88 ty::IntTy::I128 => I128,
89 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
92 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
95 ty::UintTy::U16 => I16,
96 ty::UintTy::U32 => I32,
97 ty::UintTy::U64 => I64,
98 ty::UintTy::U128 => I128,
99 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
103 /// Finds the appropriate Integer type and signedness for the given
104 /// signed discriminant range and `#[repr]` attribute.
105 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
106 /// that shouldn't affect anything, other than maybe debuginfo.
113 ) -> (Integer, bool) {
114 // Theoretically, negative values could be larger in unsigned representation
115 // than the unsigned representation of the signed minimum. However, if there
116 // are any negative values, the only valid unsigned representation is u128
117 // which can fit all i128 values, so the result remains unaffected.
118 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
119 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
121 if let Some(ity) = repr.int {
122 let discr = Integer::from_attr(&tcx, ity);
123 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
126 "Integer::repr_discr: `#[repr]` hint too small for \
127 discriminant range of enum `{}",
131 return (discr, ity.is_signed());
134 let at_least = if repr.c() {
135 // This is usually I32, however it can be different on some platforms,
136 // notably hexagon and arm-none/thumb-none
137 tcx.data_layout().c_enum_min_size
139 // repr(Rust) enums try to be as small as possible
143 // If there are no negative values, we can use the unsigned fit.
145 (cmp::max(unsigned_fit, at_least), false)
147 (cmp::max(signed_fit, at_least), true)
152 pub trait PrimitiveExt {
153 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
154 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
157 impl PrimitiveExt for Primitive {
159 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161 Int(i, signed) => i.to_ty(tcx, signed),
162 F32 => tcx.types.f32,
163 F64 => tcx.types.f64,
164 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
168 /// Return an *integer* type matching this primitive.
169 /// Useful in particular when dealing with enum discriminants.
171 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173 Int(i, signed) => i.to_ty(tcx, signed),
174 Pointer => tcx.types.usize,
175 F32 | F64 => bug!("floats do not have an int type"),
180 /// The first half of a fat pointer.
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
186 /// The second half of a fat pointer.
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
192 /// The maximum supported number of lanes in a SIMD vector.
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
202 SizeOverflow(Ty<'tcx>),
203 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
206 impl<'tcx> fmt::Display for LayoutError<'tcx> {
207 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
209 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
210 LayoutError::SizeOverflow(ty) => {
211 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213 LayoutError::NormalizationFailure(t, e) => write!(
215 "unable to determine layout for `{}` because `{}` cannot be normalized",
217 e.get_type_for_failure()
223 #[instrument(skip(tcx, query), level = "debug")]
226 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
227 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
228 ty::tls::with_related_context(tcx, move |icx| {
229 let (param_env, ty) = query.into_parts();
232 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
233 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
236 // Update the ImplicitCtxt to increase the layout_depth
237 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
239 ty::tls::enter_context(&icx, |_| {
240 let param_env = param_env.with_reveal_all_normalized(tcx);
241 let unnormalized_ty = ty;
243 // FIXME: We might want to have two different versions of `layout_of`:
244 // One that can be called after typecheck has completed and can use
245 // `normalize_erasing_regions` here and another one that can be called
246 // before typecheck has completed and uses `try_normalize_erasing_regions`.
247 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
249 Err(normalization_error) => {
250 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
254 if ty != unnormalized_ty {
255 // Ensure this layout is also cached for the normalized type.
256 return tcx.layout_of(param_env.and(ty));
259 let cx = LayoutCx { tcx, param_env };
261 let layout = cx.layout_of_uncached(ty)?;
262 let layout = TyAndLayout { ty, layout };
264 cx.record_layout_for_printing(layout);
266 // Type-level uninhabitedness should always imply ABI uninhabitedness.
267 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
268 assert!(layout.abi.is_uninhabited());
276 pub struct LayoutCx<'tcx, C> {
278 pub param_env: ty::ParamEnv<'tcx>,
281 #[derive(Copy, Clone, Debug)]
283 /// A tuple, closure, or univariant which cannot be coerced to unsized.
285 /// A univariant, the last field of which may be coerced to unsized.
287 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
288 Prefixed(Size, Align),
291 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
292 // This is used to go between `memory_index` (source field order to memory order)
293 // and `inverse_memory_index` (memory order to source field order).
294 // See also `FieldsShape::Arbitrary::memory_index` for more details.
295 // FIXME(eddyb) build a better abstraction for permutations, if possible.
296 fn invert_mapping(map: &[u32]) -> Vec<u32> {
297 let mut inverse = vec![0; map.len()];
298 for i in 0..map.len() {
299 inverse[map[i] as usize] = i as u32;
304 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
305 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
306 let dl = self.data_layout();
307 let b_align = b.align(dl);
308 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
309 let b_offset = a.size(dl).align_to(b_align.abi);
310 let size = (b_offset + b.size(dl)).align_to(align.abi);
312 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
313 // returns the last maximum.
314 let largest_niche = Niche::from_scalar(dl, b_offset, b)
316 .chain(Niche::from_scalar(dl, Size::ZERO, a))
317 .max_by_key(|niche| niche.available(dl));
320 variants: Variants::Single { index: VariantIdx::new(0) },
321 fields: FieldsShape::Arbitrary {
322 offsets: vec![Size::ZERO, b_offset],
323 memory_index: vec![0, 1],
325 abi: Abi::ScalarPair(a, b),
332 fn univariant_uninterned(
335 fields: &[TyAndLayout<'_>],
338 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
339 let dl = self.data_layout();
340 let pack = repr.pack;
341 if pack.is_some() && repr.align.is_some() {
342 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
343 return Err(LayoutError::Unknown(ty));
346 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
348 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
350 let optimize = !repr.inhibit_struct_field_reordering_opt();
353 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
354 let optimizing = &mut inverse_memory_index[..end];
355 let field_align = |f: &TyAndLayout<'_>| {
356 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
359 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
360 // the field ordering to try and catch some code making assumptions about layouts
361 // we don't guarantee
362 if repr.can_randomize_type_layout() {
363 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
364 // randomize field ordering with
365 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
367 // Shuffle the ordering of the fields
368 optimizing.shuffle(&mut rng);
370 // Otherwise we just leave things alone and actually optimize the type's fields
373 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
374 optimizing.sort_by_key(|&x| {
375 // Place ZSTs first to avoid "interesting offsets",
376 // especially with only one or two non-ZST fields.
377 let f = &fields[x as usize];
378 (!f.is_zst(), cmp::Reverse(field_align(f)))
382 StructKind::Prefixed(..) => {
383 // Sort in ascending alignment so that the layout stays optimal
384 // regardless of the prefix
385 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
389 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
390 // regardless of the status of `-Z randomize-layout`
394 // inverse_memory_index holds field indices by increasing memory offset.
395 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
396 // We now write field offsets to the corresponding offset slot;
397 // field 5 with offset 0 puts 0 in offsets[5].
398 // At the bottom of this function, we invert `inverse_memory_index` to
399 // produce `memory_index` (see `invert_mapping`).
401 let mut sized = true;
402 let mut offsets = vec![Size::ZERO; fields.len()];
403 let mut offset = Size::ZERO;
404 let mut largest_niche = None;
405 let mut largest_niche_available = 0;
407 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
409 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
410 align = align.max(AbiAndPrefAlign::new(prefix_align));
411 offset = prefix_size.align_to(prefix_align);
414 for &i in &inverse_memory_index {
415 let field = fields[i as usize];
417 self.tcx.sess.delay_span_bug(
420 "univariant: field #{} of `{}` comes after unsized field",
427 if field.is_unsized() {
431 // Invariant: offset < dl.obj_size_bound() <= 1<<61
432 let field_align = if let Some(pack) = pack {
433 field.align.min(AbiAndPrefAlign::new(pack))
437 offset = offset.align_to(field_align.abi);
438 align = align.max(field_align);
440 debug!("univariant offset: {:?} field: {:#?}", offset, field);
441 offsets[i as usize] = offset;
443 if !repr.hide_niche() {
444 if let Some(mut niche) = field.largest_niche {
445 let available = niche.available(dl);
446 if available > largest_niche_available {
447 largest_niche_available = available;
448 niche.offset += offset;
449 largest_niche = Some(niche);
454 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
457 if let Some(repr_align) = repr.align {
458 align = align.max(AbiAndPrefAlign::new(repr_align));
461 debug!("univariant min_size: {:?}", offset);
462 let min_size = offset;
464 // As stated above, inverse_memory_index holds field indices by increasing offset.
465 // This makes it an already-sorted view of the offsets vec.
466 // To invert it, consider:
467 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
468 // Field 5 would be the first element, so memory_index is i:
469 // Note: if we didn't optimize, it's already right.
472 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
474 let size = min_size.align_to(align.abi);
475 let mut abi = Abi::Aggregate { sized };
477 // Unpack newtype ABIs and find scalar pairs.
478 if sized && size.bytes() > 0 {
479 // All other fields must be ZSTs.
480 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
482 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
483 // We have exactly one non-ZST field.
484 (Some((i, field)), None, None) => {
485 // Field fills the struct and it has a scalar or scalar pair ABI.
486 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
489 // For plain scalars, or vectors of them, we can't unpack
490 // newtypes for `#[repr(C)]`, as that affects C ABIs.
491 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
494 // But scalar pairs are Rust-specific and get
495 // treated as aggregates by C ABIs anyway.
496 Abi::ScalarPair(..) => {
504 // Two non-ZST fields, and they're both scalars.
505 (Some((i, a)), Some((j, b)), None) => {
506 match (a.abi, b.abi) {
507 (Abi::Scalar(a), Abi::Scalar(b)) => {
508 // Order by the memory placement, not source order.
509 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
514 let pair = self.scalar_pair(a, b);
515 let pair_offsets = match pair.fields {
516 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
517 assert_eq!(memory_index, &[0, 1]);
522 if offsets[i] == pair_offsets[0]
523 && offsets[j] == pair_offsets[1]
524 && align == pair.align
527 // We can use `ScalarPair` only when it matches our
528 // already computed layout (including `#[repr(C)]`).
540 if fields.iter().any(|f| f.abi.is_uninhabited()) {
541 abi = Abi::Uninhabited;
545 variants: Variants::Single { index: VariantIdx::new(0) },
546 fields: FieldsShape::Arbitrary { offsets, memory_index },
554 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
556 let param_env = self.param_env;
557 let dl = self.data_layout();
558 let scalar_unit = |value: Primitive| {
559 let size = value.size(dl);
560 assert!(size.bits() <= 128);
561 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
564 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
566 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
567 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
569 debug_assert!(!ty.has_infer_types_or_consts());
571 Ok(match *ty.kind() {
573 ty::Bool => tcx.intern_layout(LayoutS::scalar(
575 Scalar::Initialized {
576 value: Int(I8, false),
577 valid_range: WrappingRange { start: 0, end: 1 },
580 ty::Char => tcx.intern_layout(LayoutS::scalar(
582 Scalar::Initialized {
583 value: Int(I32, false),
584 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
587 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
588 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
589 ty::Float(fty) => scalar(match fty {
590 ty::FloatTy::F32 => F32,
591 ty::FloatTy::F64 => F64,
594 let mut ptr = scalar_unit(Pointer);
595 ptr.valid_range_mut().start = 1;
596 tcx.intern_layout(LayoutS::scalar(self, ptr))
600 ty::Never => tcx.intern_layout(LayoutS {
601 variants: Variants::Single { index: VariantIdx::new(0) },
602 fields: FieldsShape::Primitive,
603 abi: Abi::Uninhabited,
609 // Potentially-wide pointers.
610 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
611 let mut data_ptr = scalar_unit(Pointer);
612 if !ty.is_unsafe_ptr() {
613 data_ptr.valid_range_mut().start = 1;
616 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
617 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
618 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
621 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
622 let metadata = match unsized_part.kind() {
624 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
626 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
628 let mut vtable = scalar_unit(Pointer);
629 vtable.valid_range_mut().start = 1;
632 _ => return Err(LayoutError::Unknown(unsized_part)),
635 // Effectively a (ptr, meta) tuple.
636 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
639 // Arrays and slices.
640 ty::Array(element, mut count) => {
641 if count.has_projections() {
642 count = tcx.normalize_erasing_regions(param_env, count);
643 if count.has_projections() {
644 return Err(LayoutError::Unknown(ty));
648 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
649 let element = self.layout_of(element)?;
651 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
654 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
657 Abi::Aggregate { sized: true }
660 let largest_niche = if count != 0 { element.largest_niche } else { None };
662 tcx.intern_layout(LayoutS {
663 variants: Variants::Single { index: VariantIdx::new(0) },
664 fields: FieldsShape::Array { stride: element.size, count },
667 align: element.align,
671 ty::Slice(element) => {
672 let element = self.layout_of(element)?;
673 tcx.intern_layout(LayoutS {
674 variants: Variants::Single { index: VariantIdx::new(0) },
675 fields: FieldsShape::Array { stride: element.size, count: 0 },
676 abi: Abi::Aggregate { sized: false },
678 align: element.align,
682 ty::Str => tcx.intern_layout(LayoutS {
683 variants: Variants::Single { index: VariantIdx::new(0) },
684 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
685 abi: Abi::Aggregate { sized: false },
692 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
693 ty::Dynamic(..) | ty::Foreign(..) => {
694 let mut unit = self.univariant_uninterned(
697 &ReprOptions::default(),
698 StructKind::AlwaysSized,
701 Abi::Aggregate { ref mut sized } => *sized = false,
704 tcx.intern_layout(unit)
707 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
709 ty::Closure(_, ref substs) => {
710 let tys = substs.as_closure().upvar_tys();
712 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
713 &ReprOptions::default(),
714 StructKind::AlwaysSized,
720 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
723 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
724 &ReprOptions::default(),
729 // SIMD vector types.
730 ty::Adt(def, substs) if def.repr().simd() => {
731 if !def.is_struct() {
732 // Should have yielded E0517 by now.
733 tcx.sess.delay_span_bug(
735 "#[repr(simd)] was applied to an ADT that is not a struct",
737 return Err(LayoutError::Unknown(ty));
740 // Supported SIMD vectors are homogeneous ADTs with at least one field:
742 // * #[repr(simd)] struct S(T, T, T, T);
743 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
744 // * #[repr(simd)] struct S([T; 4])
746 // where T is a primitive scalar (integer/float/pointer).
748 // SIMD vectors with zero fields are not supported.
749 // (should be caught by typeck)
750 if def.non_enum_variant().fields.is_empty() {
751 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
754 // Type of the first ADT field:
755 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
757 // Heterogeneous SIMD vectors are not supported:
758 // (should be caught by typeck)
759 for fi in &def.non_enum_variant().fields {
760 if fi.ty(tcx, substs) != f0_ty {
761 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
765 // The element type and number of elements of the SIMD vector
766 // are obtained from:
768 // * the element type and length of the single array field, if
769 // the first field is of array type, or
771 // * the homogenous field type and the number of fields.
772 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
773 // First ADT field is an array:
775 // SIMD vectors with multiple array fields are not supported:
776 // (should be caught by typeck)
777 if def.non_enum_variant().fields.len() != 1 {
778 tcx.sess.fatal(&format!(
779 "monomorphising SIMD type `{}` with more than one array field",
784 // Extract the number of elements from the layout of the array field:
785 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
786 return Err(LayoutError::Unknown(ty));
789 (*e_ty, *count, true)
791 // First ADT field is not an array:
792 (f0_ty, def.non_enum_variant().fields.len() as _, false)
795 // SIMD vectors of zero length are not supported.
796 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
799 // Can't be caught in typeck if the array length is generic.
801 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
802 } else if e_len > MAX_SIMD_LANES {
803 tcx.sess.fatal(&format!(
804 "monomorphising SIMD type `{}` of length greater than {}",
809 // Compute the ABI of the element type:
810 let e_ly = self.layout_of(e_ty)?;
811 let Abi::Scalar(e_abi) = e_ly.abi else {
812 // This error isn't caught in typeck, e.g., if
813 // the element type of the vector is generic.
814 tcx.sess.fatal(&format!(
815 "monomorphising SIMD type `{}` with a non-primitive-scalar \
816 (integer/float/pointer) element type `{}`",
821 // Compute the size and alignment of the vector:
822 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
823 let align = dl.vector_align(size);
824 let size = size.align_to(align.abi);
826 // Compute the placement of the vector fields:
827 let fields = if is_array {
828 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
830 FieldsShape::Array { stride: e_ly.size, count: e_len }
833 tcx.intern_layout(LayoutS {
834 variants: Variants::Single { index: VariantIdx::new(0) },
836 abi: Abi::Vector { element: e_abi, count: e_len },
837 largest_niche: e_ly.largest_niche,
844 ty::Adt(def, substs) => {
845 // Cache the field layouts.
852 .map(|field| self.layout_of(field.ty(tcx, substs)))
853 .collect::<Result<Vec<_>, _>>()
855 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
858 if def.repr().pack.is_some() && def.repr().align.is_some() {
859 self.tcx.sess.delay_span_bug(
860 tcx.def_span(def.did()),
861 "union cannot be packed and aligned",
863 return Err(LayoutError::Unknown(ty));
867 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
869 if let Some(repr_align) = def.repr().align {
870 align = align.max(AbiAndPrefAlign::new(repr_align));
873 let optimize = !def.repr().inhibit_union_abi_opt();
874 let mut size = Size::ZERO;
875 let mut abi = Abi::Aggregate { sized: true };
876 let index = VariantIdx::new(0);
877 for field in &variants[index] {
878 assert!(!field.is_unsized());
879 align = align.max(field.align);
881 // If all non-ZST fields have the same ABI, forward this ABI
882 if optimize && !field.is_zst() {
883 // Discard valid range information and allow undef
884 let field_abi = match field.abi {
885 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
886 Abi::ScalarPair(x, y) => {
887 Abi::ScalarPair(x.to_union(), y.to_union())
889 Abi::Vector { element: x, count } => {
890 Abi::Vector { element: x.to_union(), count }
892 Abi::Uninhabited | Abi::Aggregate { .. } => {
893 Abi::Aggregate { sized: true }
897 if size == Size::ZERO {
898 // first non ZST: initialize 'abi'
900 } else if abi != field_abi {
901 // different fields have different ABI: reset to Aggregate
902 abi = Abi::Aggregate { sized: true };
906 size = cmp::max(size, field.size);
909 if let Some(pack) = def.repr().pack {
910 align = align.min(AbiAndPrefAlign::new(pack));
913 return Ok(tcx.intern_layout(LayoutS {
914 variants: Variants::Single { index },
915 fields: FieldsShape::Union(
916 NonZeroUsize::new(variants[index].len())
917 .ok_or(LayoutError::Unknown(ty))?,
922 size: size.align_to(align.abi),
926 // A variant is absent if it's uninhabited and only has ZST fields.
927 // Present uninhabited variants only require space for their fields,
928 // but *not* an encoding of the discriminant (e.g., a tag value).
929 // See issue #49298 for more details on the need to leave space
930 // for non-ZST uninhabited data (mostly partial initialization).
931 let absent = |fields: &[TyAndLayout<'_>]| {
932 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
933 let is_zst = fields.iter().all(|f| f.is_zst());
934 uninhabited && is_zst
936 let (present_first, present_second) = {
937 let mut present_variants = variants
939 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
940 (present_variants.next(), present_variants.next())
942 let present_first = match present_first {
943 Some(present_first) => present_first,
944 // Uninhabited because it has no variants, or only absent ones.
945 None if def.is_enum() => {
946 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
948 // If it's a struct, still compute a layout so that we can still compute the
950 None => VariantIdx::new(0),
953 let is_struct = !def.is_enum() ||
954 // Only one variant is present.
955 (present_second.is_none() &&
956 // Representation optimizations are allowed.
957 !def.repr().inhibit_enum_layout_opt());
959 // Struct, or univariant enum equivalent to a struct.
960 // (Typechecking will reject discriminant-sizing attrs.)
962 let v = present_first;
963 let kind = if def.is_enum() || variants[v].is_empty() {
964 StructKind::AlwaysSized
966 let param_env = tcx.param_env(def.did());
967 let last_field = def.variant(v).fields.last().unwrap();
969 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
971 StructKind::MaybeUnsized
973 StructKind::AlwaysSized
977 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
978 st.variants = Variants::Single { index: v };
979 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
981 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
982 // the asserts ensure that we are not using the
983 // `#[rustc_layout_scalar_valid_range(n)]`
984 // attribute to widen the range of anything as that would probably
985 // result in UB somewhere
986 // FIXME(eddyb) the asserts are probably not needed,
987 // as larger validity ranges would result in missed
988 // optimizations, *not* wrongly assuming the inner
989 // value is valid. e.g. unions enlarge validity ranges,
990 // because the values may be uninitialized.
991 if let Bound::Included(start) = start {
992 // FIXME(eddyb) this might be incorrect - it doesn't
993 // account for wrap-around (end < start) ranges.
994 let valid_range = scalar.valid_range_mut();
995 assert!(valid_range.start <= start);
996 valid_range.start = start;
998 if let Bound::Included(end) = end {
999 // FIXME(eddyb) this might be incorrect - it doesn't
1000 // account for wrap-around (end < start) ranges.
1001 let valid_range = scalar.valid_range_mut();
1002 assert!(valid_range.end >= end);
1003 valid_range.end = end;
1006 // Update `largest_niche` if we have introduced a larger niche.
1007 let niche = if def.repr().hide_niche() {
1010 Niche::from_scalar(dl, Size::ZERO, *scalar)
1012 if let Some(niche) = niche {
1013 match st.largest_niche {
1014 Some(largest_niche) => {
1015 // Replace the existing niche even if they're equal,
1016 // because this one is at a lower offset.
1017 if largest_niche.available(dl) <= niche.available(dl) {
1018 st.largest_niche = Some(niche);
1021 None => st.largest_niche = Some(niche),
1026 start == Bound::Unbounded && end == Bound::Unbounded,
1027 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1033 return Ok(tcx.intern_layout(st));
1036 // At this point, we have handled all unions and
1037 // structs. (We have also handled univariant enums
1038 // that allow representation optimization.)
1039 assert!(def.is_enum());
1041 // The current code for niche-filling relies on variant indices
1042 // instead of actual discriminants, so dataful enums with
1043 // explicit discriminants (RFC #2363) would misbehave.
1044 let no_explicit_discriminants = def
1047 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1049 let mut niche_filling_layout = None;
1051 // Niche-filling enum optimization.
1052 if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
1053 let mut dataful_variant = None;
1054 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1056 // Find one non-ZST variant.
1057 'variants: for (v, fields) in variants.iter_enumerated() {
1063 if dataful_variant.is_none() {
1064 dataful_variant = Some(v);
1067 dataful_variant = None;
1072 niche_variants = *niche_variants.start().min(&v)..=v;
1075 if niche_variants.start() > niche_variants.end() {
1076 dataful_variant = None;
1079 if let Some(i) = dataful_variant {
1080 let count = (niche_variants.end().as_u32()
1081 - niche_variants.start().as_u32()
1084 // Find the field with the largest niche
1085 let niche_candidate = variants[i]
1088 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1089 .max_by_key(|(_, niche)| niche.available(dl));
1091 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1092 niche_candidate.and_then(|(field_index, niche)| {
1093 Some((field_index, niche, niche.reserve(self, count)?))
1096 let mut align = dl.aggregate_align;
1100 let mut st = self.univariant_uninterned(
1104 StructKind::AlwaysSized,
1106 st.variants = Variants::Single { index: j };
1108 align = align.max(st.align);
1110 Ok(tcx.intern_layout(st))
1112 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1114 let offset = st[i].fields().offset(field_index) + niche.offset;
1115 let size = st[i].size();
1117 let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
1121 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1122 Abi::ScalarPair(first, second) => {
1123 // Only the niche is guaranteed to be initialised,
1124 // so use union layout for the other primitive.
1125 if offset.bytes() == 0 {
1126 Abi::ScalarPair(niche_scalar, second.to_union())
1128 Abi::ScalarPair(first.to_union(), niche_scalar)
1131 _ => Abi::Aggregate { sized: true },
1135 let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1137 niche_filling_layout = Some(LayoutS {
1138 variants: Variants::Multiple {
1140 tag_encoding: TagEncoding::Niche {
1148 fields: FieldsShape::Arbitrary {
1149 offsets: vec![offset],
1150 memory_index: vec![0],
1161 let (mut min, mut max) = (i128::MAX, i128::MIN);
1162 let discr_type = def.repr().discr_type();
1163 let bits = Integer::from_attr(self, discr_type).size().bits();
1164 for (i, discr) in def.discriminants(tcx) {
1165 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1168 let mut x = discr.val as i128;
1169 if discr_type.is_signed() {
1170 // sign extend the raw representation to be an i128
1171 x = (x << (128 - bits)) >> (128 - bits);
1180 // We might have no inhabited variants, so pretend there's at least one.
1181 if (min, max) == (i128::MAX, i128::MIN) {
1185 assert!(min <= max, "discriminant range is {}...{}", min, max);
1186 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1188 let mut align = dl.aggregate_align;
1189 let mut size = Size::ZERO;
1191 // We're interested in the smallest alignment, so start large.
1192 let mut start_align = Align::from_bytes(256).unwrap();
1193 assert_eq!(Integer::for_align(dl, start_align), None);
1195 // repr(C) on an enum tells us to make a (tag, union) layout,
1196 // so we need to grow the prefix alignment to be at least
1197 // the alignment of the union. (This value is used both for
1198 // determining the alignment of the overall enum, and the
1199 // determining the alignment of the payload after the tag.)
1200 let mut prefix_align = min_ity.align(dl).abi;
1202 for fields in &variants {
1203 for field in fields {
1204 prefix_align = prefix_align.max(field.align.abi);
1209 // Create the set of structs that represent each variant.
1210 let mut layout_variants = variants
1212 .map(|(i, field_layouts)| {
1213 let mut st = self.univariant_uninterned(
1217 StructKind::Prefixed(min_ity.size(), prefix_align),
1219 st.variants = Variants::Single { index: i };
1220 // Find the first field we can't move later
1221 // to make room for a larger discriminant.
1223 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1225 if !field.is_zst() || field.align.abi.bytes() != 1 {
1226 start_align = start_align.min(field.align.abi);
1230 size = cmp::max(size, st.size);
1231 align = align.max(st.align);
1234 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1236 // Align the maximum variant size to the largest alignment.
1237 size = size.align_to(align.abi);
1239 if size.bytes() >= dl.obj_size_bound() {
1240 return Err(LayoutError::SizeOverflow(ty));
1243 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1244 if typeck_ity < min_ity {
1245 // It is a bug if Layout decided on a greater discriminant size than typeck for
1246 // some reason at this point (based on values discriminant can take on). Mostly
1247 // because this discriminant will be loaded, and then stored into variable of
1248 // type calculated by typeck. Consider such case (a bug): typeck decided on
1249 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1250 // discriminant values. That would be a bug, because then, in codegen, in order
1251 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1252 // space necessary to represent would have to be discarded (or layout is wrong
1253 // on thinking it needs 16 bits)
1255 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1259 // However, it is fine to make discr type however large (as an optimisation)
1260 // after this point – we’ll just truncate the value we load in codegen.
1263 // Check to see if we should use a different type for the
1264 // discriminant. We can safely use a type with the same size
1265 // as the alignment of the first field of each variant.
1266 // We increase the size of the discriminant to avoid LLVM copying
1267 // padding when it doesn't need to. This normally causes unaligned
1268 // load/stores and excessive memcpy/memset operations. By using a
1269 // bigger integer size, LLVM can be sure about its contents and
1270 // won't be so conservative.
1272 // Use the initial field alignment
1273 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1276 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1279 // If the alignment is not larger than the chosen discriminant size,
1280 // don't use the alignment as the final size.
1284 // Patch up the variants' first few fields.
1285 let old_ity_size = min_ity.size();
1286 let new_ity_size = ity.size();
1287 for variant in &mut layout_variants {
1288 match variant.fields {
1289 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1291 if *i <= old_ity_size {
1292 assert_eq!(*i, old_ity_size);
1296 // We might be making the struct larger.
1297 if variant.size <= old_ity_size {
1298 variant.size = new_ity_size;
1306 let tag_mask = ity.size().unsigned_int_max();
1307 let tag = Scalar::Initialized {
1308 value: Int(ity, signed),
1309 valid_range: WrappingRange {
1310 start: (min as u128 & tag_mask),
1311 end: (max as u128 & tag_mask),
1314 let mut abi = Abi::Aggregate { sized: true };
1316 // Without latter check aligned enums with custom discriminant values
1317 // Would result in ICE see the issue #92464 for more info
1318 if tag.size(dl) == size || variants.iter().all(|layout| layout.is_empty()) {
1319 abi = Abi::Scalar(tag);
1321 // Try to use a ScalarPair for all tagged enums.
1322 let mut common_prim = None;
1323 let mut common_prim_initialized_in_all_variants = true;
1324 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1325 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1329 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1330 let (field, offset) = match (fields.next(), fields.next()) {
1332 common_prim_initialized_in_all_variants = false;
1335 (Some(pair), None) => pair,
1341 let prim = match field.abi {
1342 Abi::Scalar(scalar) => {
1343 common_prim_initialized_in_all_variants &=
1344 matches!(scalar, Scalar::Initialized { .. });
1352 if let Some(pair) = common_prim {
1353 // This is pretty conservative. We could go fancier
1354 // by conflating things like i32 and u32, or even
1355 // realising that (u8, u8) could just cohabit with
1357 if pair != (prim, offset) {
1362 common_prim = Some((prim, offset));
1365 if let Some((prim, offset)) = common_prim {
1366 let prim_scalar = if common_prim_initialized_in_all_variants {
1369 // Common prim might be uninit.
1370 Scalar::Union { value: prim }
1372 let pair = self.scalar_pair(tag, prim_scalar);
1373 let pair_offsets = match pair.fields {
1374 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1375 assert_eq!(memory_index, &[0, 1]);
1380 if pair_offsets[0] == Size::ZERO
1381 && pair_offsets[1] == *offset
1382 && align == pair.align
1383 && size == pair.size
1385 // We can use `ScalarPair` only when it matches our
1386 // already computed layout (including `#[repr(C)]`).
1392 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1393 abi = Abi::Uninhabited;
1396 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1398 let layout_variants =
1399 layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
1401 let tagged_layout = LayoutS {
1402 variants: Variants::Multiple {
1404 tag_encoding: TagEncoding::Direct,
1406 variants: layout_variants,
1408 fields: FieldsShape::Arbitrary {
1409 offsets: vec![Size::ZERO],
1410 memory_index: vec![0],
1418 let best_layout = match (tagged_layout, niche_filling_layout) {
1419 (tagged_layout, Some(niche_filling_layout)) => {
1420 // Pick the smaller layout; otherwise,
1421 // pick the layout with the larger niche; otherwise,
1422 // pick tagged as it has simpler codegen.
1423 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1424 let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1425 (layout.size, cmp::Reverse(niche_size))
1428 (tagged_layout, None) => tagged_layout,
1431 tcx.intern_layout(best_layout)
1434 // Types with no meaningful known layout.
1435 ty::Projection(_) | ty::Opaque(..) => {
1436 // NOTE(eddyb) `layout_of` query should've normalized these away,
1437 // if that was possible, so there's no reason to try again here.
1438 return Err(LayoutError::Unknown(ty));
1441 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1442 bug!("Layout::compute: unexpected type `{}`", ty)
1445 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1446 return Err(LayoutError::Unknown(ty));
1452 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1453 #[derive(Clone, Debug, PartialEq)]
1454 enum SavedLocalEligibility {
1456 Assigned(VariantIdx),
1457 // FIXME: Use newtype_index so we aren't wasting bytes
1458 Ineligible(Option<u32>),
1461 // When laying out generators, we divide our saved local fields into two
1462 // categories: overlap-eligible and overlap-ineligible.
1464 // Those fields which are ineligible for overlap go in a "prefix" at the
1465 // beginning of the layout, and always have space reserved for them.
1467 // Overlap-eligible fields are only assigned to one variant, so we lay
1468 // those fields out for each variant and put them right after the
1471 // Finally, in the layout details, we point to the fields from the
1472 // variants they are assigned to. It is possible for some fields to be
1473 // included in multiple variants. No field ever "moves around" in the
1474 // layout; its offset is always the same.
1476 // Also included in the layout are the upvars and the discriminant.
1477 // These are included as fields on the "outer" layout; they are not part
1479 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1480 /// Compute the eligibility and assignment of each local.
1481 fn generator_saved_local_eligibility(
1483 info: &GeneratorLayout<'tcx>,
1484 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1485 use SavedLocalEligibility::*;
1487 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1488 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1490 // The saved locals not eligible for overlap. These will get
1491 // "promoted" to the prefix of our generator.
1492 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1494 // Figure out which of our saved locals are fields in only
1495 // one variant. The rest are deemed ineligible for overlap.
1496 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1497 for local in fields {
1498 match assignments[*local] {
1500 assignments[*local] = Assigned(variant_index);
1503 // We've already seen this local at another suspension
1504 // point, so it is no longer a candidate.
1506 "removing local {:?} in >1 variant ({:?}, {:?})",
1511 ineligible_locals.insert(*local);
1512 assignments[*local] = Ineligible(None);
1519 // Next, check every pair of eligible locals to see if they
1521 for local_a in info.storage_conflicts.rows() {
1522 let conflicts_a = info.storage_conflicts.count(local_a);
1523 if ineligible_locals.contains(local_a) {
1527 for local_b in info.storage_conflicts.iter(local_a) {
1528 // local_a and local_b are storage live at the same time, therefore they
1529 // cannot overlap in the generator layout. The only way to guarantee
1530 // this is if they are in the same variant, or one is ineligible
1531 // (which means it is stored in every variant).
1532 if ineligible_locals.contains(local_b)
1533 || assignments[local_a] == assignments[local_b]
1538 // If they conflict, we will choose one to make ineligible.
1539 // This is not always optimal; it's just a greedy heuristic that
1540 // seems to produce good results most of the time.
1541 let conflicts_b = info.storage_conflicts.count(local_b);
1542 let (remove, other) =
1543 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1544 ineligible_locals.insert(remove);
1545 assignments[remove] = Ineligible(None);
1546 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1550 // Count the number of variants in use. If only one of them, then it is
1551 // impossible to overlap any locals in our layout. In this case it's
1552 // always better to make the remaining locals ineligible, so we can
1553 // lay them out with the other locals in the prefix and eliminate
1554 // unnecessary padding bytes.
1556 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1557 for assignment in &assignments {
1558 if let Assigned(idx) = assignment {
1559 used_variants.insert(*idx);
1562 if used_variants.count() < 2 {
1563 for assignment in assignments.iter_mut() {
1564 *assignment = Ineligible(None);
1566 ineligible_locals.insert_all();
1570 // Write down the order of our locals that will be promoted to the prefix.
1572 for (idx, local) in ineligible_locals.iter().enumerate() {
1573 assignments[local] = Ineligible(Some(idx as u32));
1576 debug!("generator saved local assignments: {:?}", assignments);
1578 (ineligible_locals, assignments)
1581 /// Compute the full generator layout.
1582 fn generator_layout(
1585 def_id: hir::def_id::DefId,
1586 substs: SubstsRef<'tcx>,
1587 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1588 use SavedLocalEligibility::*;
1590 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1592 let Some(info) = tcx.generator_layout(def_id) else {
1593 return Err(LayoutError::Unknown(ty));
1595 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1597 // Build a prefix layout, including "promoting" all ineligible
1598 // locals as part of the prefix. We compute the layout of all of
1599 // these fields at once to get optimal packing.
1600 let tag_index = substs.as_generator().prefix_tys().count();
1602 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1603 let max_discr = (info.variant_fields.len() - 1) as u128;
1604 let discr_int = Integer::fit_unsigned(max_discr);
1605 let discr_int_ty = discr_int.to_ty(tcx, false);
1606 let tag = Scalar::Initialized {
1607 value: Primitive::Int(discr_int, false),
1608 valid_range: WrappingRange { start: 0, end: max_discr },
1610 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1611 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1613 let promoted_layouts = ineligible_locals
1615 .map(|local| subst_field(info.field_tys[local]))
1616 .map(|ty| tcx.mk_maybe_uninit(ty))
1617 .map(|ty| self.layout_of(ty));
1618 let prefix_layouts = substs
1621 .map(|ty| self.layout_of(ty))
1622 .chain(iter::once(Ok(tag_layout)))
1623 .chain(promoted_layouts)
1624 .collect::<Result<Vec<_>, _>>()?;
1625 let prefix = self.univariant_uninterned(
1628 &ReprOptions::default(),
1629 StructKind::AlwaysSized,
1632 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1634 // Split the prefix layout into the "outer" fields (upvars and
1635 // discriminant) and the "promoted" fields. Promoted fields will
1636 // get included in each variant that requested them in
1638 debug!("prefix = {:#?}", prefix);
1639 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1640 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1641 let mut inverse_memory_index = invert_mapping(&memory_index);
1643 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1644 // "outer" and "promoted" fields respectively.
1645 let b_start = (tag_index + 1) as u32;
1646 let offsets_b = offsets.split_off(b_start as usize);
1647 let offsets_a = offsets;
1649 // Disentangle the "a" and "b" components of `inverse_memory_index`
1650 // by preserving the order but keeping only one disjoint "half" each.
1651 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1652 let inverse_memory_index_b: Vec<_> =
1653 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1654 inverse_memory_index.retain(|&i| i < b_start);
1655 let inverse_memory_index_a = inverse_memory_index;
1657 // Since `inverse_memory_index_{a,b}` each only refer to their
1658 // respective fields, they can be safely inverted
1659 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1660 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1663 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1664 (outer_fields, offsets_b, memory_index_b)
1669 let mut size = prefix.size;
1670 let mut align = prefix.align;
1674 .map(|(index, variant_fields)| {
1675 // Only include overlap-eligible fields when we compute our variant layout.
1676 let variant_only_tys = variant_fields
1678 .filter(|local| match assignments[**local] {
1679 Unassigned => bug!(),
1680 Assigned(v) if v == index => true,
1681 Assigned(_) => bug!("assignment does not match variant"),
1682 Ineligible(_) => false,
1684 .map(|local| subst_field(info.field_tys[*local]));
1686 let mut variant = self.univariant_uninterned(
1689 .map(|ty| self.layout_of(ty))
1690 .collect::<Result<Vec<_>, _>>()?,
1691 &ReprOptions::default(),
1692 StructKind::Prefixed(prefix_size, prefix_align.abi),
1694 variant.variants = Variants::Single { index };
1696 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1700 // Now, stitch the promoted and variant-only fields back together in
1701 // the order they are mentioned by our GeneratorLayout.
1702 // Because we only use some subset (that can differ between variants)
1703 // of the promoted fields, we can't just pick those elements of the
1704 // `promoted_memory_index` (as we'd end up with gaps).
1705 // So instead, we build an "inverse memory_index", as if all of the
1706 // promoted fields were being used, but leave the elements not in the
1707 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1708 // obtain a valid (bijective) mapping.
1709 const INVALID_FIELD_IDX: u32 = !0;
1710 let mut combined_inverse_memory_index =
1711 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1712 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1713 let combined_offsets = variant_fields
1717 let (offset, memory_index) = match assignments[*local] {
1718 Unassigned => bug!(),
1720 let (offset, memory_index) =
1721 offsets_and_memory_index.next().unwrap();
1722 (offset, promoted_memory_index.len() as u32 + memory_index)
1724 Ineligible(field_idx) => {
1725 let field_idx = field_idx.unwrap() as usize;
1726 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1729 combined_inverse_memory_index[memory_index as usize] = i as u32;
1734 // Remove the unused slots and invert the mapping to obtain the
1735 // combined `memory_index` (also see previous comment).
1736 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1737 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1739 variant.fields = FieldsShape::Arbitrary {
1740 offsets: combined_offsets,
1741 memory_index: combined_memory_index,
1744 size = size.max(variant.size);
1745 align = align.max(variant.align);
1746 Ok(tcx.intern_layout(variant))
1748 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1750 size = size.align_to(align.abi);
1753 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1756 Abi::Aggregate { sized: true }
1759 let layout = tcx.intern_layout(LayoutS {
1760 variants: Variants::Multiple {
1762 tag_encoding: TagEncoding::Direct,
1763 tag_field: tag_index,
1766 fields: outer_fields,
1768 largest_niche: prefix.largest_niche,
1772 debug!("generator layout ({:?}): {:#?}", ty, layout);
1776 /// This is invoked by the `layout_of` query to record the final
1777 /// layout of each type.
1779 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1780 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1781 // for dumping later.
1782 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1783 self.record_layout_for_printing_outlined(layout)
1787 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1788 // Ignore layouts that are done with non-empty environments or
1789 // non-monomorphic layouts, as the user only wants to see the stuff
1790 // resulting from the final codegen session.
1791 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1795 // (delay format until we actually need it)
1796 let record = |kind, packed, opt_discr_size, variants| {
1797 let type_desc = format!("{:?}", layout.ty);
1798 self.tcx.sess.code_stats.record_type_size(
1809 let adt_def = match *layout.ty.kind() {
1810 ty::Adt(ref adt_def, _) => {
1811 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1815 ty::Closure(..) => {
1816 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1817 record(DataTypeKind::Closure, false, None, vec![]);
1822 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1827 let adt_kind = adt_def.adt_kind();
1828 let adt_packed = adt_def.repr().pack.is_some();
1830 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1831 let mut min_size = Size::ZERO;
1832 let field_info: Vec<_> = flds
1836 let field_layout = layout.field(self, i);
1837 let offset = layout.fields.offset(i);
1838 let field_end = offset + field_layout.size;
1839 if min_size < field_end {
1840 min_size = field_end;
1843 name: name.to_string(),
1844 offset: offset.bytes(),
1845 size: field_layout.size.bytes(),
1846 align: field_layout.align.abi.bytes(),
1852 name: n.map(|n| n.to_string()),
1853 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1854 align: layout.align.abi.bytes(),
1855 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1860 match layout.variants {
1861 Variants::Single { index } => {
1862 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1864 "print-type-size `{:#?}` variant {}",
1866 adt_def.variant(index).name
1868 let variant_def = &adt_def.variant(index);
1869 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1874 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1877 // (This case arises for *empty* enums; so give it
1879 record(adt_kind.into(), adt_packed, None, vec![]);
1883 Variants::Multiple { tag, ref tag_encoding, .. } => {
1885 "print-type-size `{:#?}` adt general variants def {}",
1887 adt_def.variants().len()
1889 let variant_infos: Vec<_> = adt_def
1892 .map(|(i, variant_def)| {
1893 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1895 Some(variant_def.name),
1897 layout.for_variant(self, i),
1904 match tag_encoding {
1905 TagEncoding::Direct => Some(tag.size(self)),
1915 /// Type size "skeleton", i.e., the only information determining a type's size.
1916 /// While this is conservative, (aside from constant sizes, only pointers,
1917 /// newtypes thereof and null pointer optimized enums are allowed), it is
1918 /// enough to statically check common use cases of transmute.
1919 #[derive(Copy, Clone, Debug)]
1920 pub enum SizeSkeleton<'tcx> {
1921 /// Any statically computable Layout.
1924 /// A potentially-fat pointer.
1926 /// If true, this pointer is never null.
1928 /// The type which determines the unsized metadata, if any,
1929 /// of this pointer. Either a type parameter or a projection
1930 /// depending on one, with regions erased.
1935 impl<'tcx> SizeSkeleton<'tcx> {
1939 param_env: ty::ParamEnv<'tcx>,
1940 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1941 debug_assert!(!ty.has_infer_types_or_consts());
1943 // First try computing a static layout.
1944 let err = match tcx.layout_of(param_env.and(ty)) {
1946 return Ok(SizeSkeleton::Known(layout.size));
1952 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1953 let non_zero = !ty.is_unsafe_ptr();
1954 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1956 ty::Param(_) | ty::Projection(_) => {
1957 debug_assert!(tail.has_param_types_or_consts());
1958 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1961 "SizeSkeleton::compute({}): layout errored ({}), yet \
1962 tail `{}` is not a type parameter or a projection",
1970 ty::Adt(def, substs) => {
1971 // Only newtypes and enums w/ nullable pointer optimization.
1972 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
1976 // Get a zero-sized variant or a pointer newtype.
1977 let zero_or_ptr_variant = |i| {
1978 let i = VariantIdx::new(i);
1980 def.variant(i).fields.iter().map(|field| {
1981 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1984 for field in fields {
1987 SizeSkeleton::Known(size) => {
1988 if size.bytes() > 0 {
1992 SizeSkeleton::Pointer { .. } => {
2003 let v0 = zero_or_ptr_variant(0)?;
2005 if def.variants().len() == 1 {
2006 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2007 return Ok(SizeSkeleton::Pointer {
2009 || match tcx.layout_scalar_valid_range(def.did()) {
2010 (Bound::Included(start), Bound::Unbounded) => start > 0,
2011 (Bound::Included(start), Bound::Included(end)) => {
2012 0 < start && start < end
2023 let v1 = zero_or_ptr_variant(1)?;
2024 // Nullable pointer enum optimization.
2026 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2027 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2028 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2034 ty::Projection(_) | ty::Opaque(..) => {
2035 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2036 if ty == normalized {
2039 SizeSkeleton::compute(normalized, tcx, param_env)
2047 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2048 match (self, other) {
2049 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2050 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2058 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2059 fn tcx(&self) -> TyCtxt<'tcx>;
2062 pub trait HasParamEnv<'tcx> {
2063 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2066 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2068 fn data_layout(&self) -> &TargetDataLayout {
2073 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2074 fn target_spec(&self) -> &Target {
2079 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2081 fn tcx(&self) -> TyCtxt<'tcx> {
2086 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2088 fn data_layout(&self) -> &TargetDataLayout {
2093 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2094 fn target_spec(&self) -> &Target {
2099 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2101 fn tcx(&self) -> TyCtxt<'tcx> {
2106 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2107 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2112 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2113 fn data_layout(&self) -> &TargetDataLayout {
2114 self.tcx.data_layout()
2118 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2119 fn target_spec(&self) -> &Target {
2120 self.tcx.target_spec()
2124 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2125 fn tcx(&self) -> TyCtxt<'tcx> {
2130 pub trait MaybeResult<T> {
2133 fn from(x: Result<T, Self::Error>) -> Self;
2134 fn to_result(self) -> Result<T, Self::Error>;
2137 impl<T> MaybeResult<T> for T {
2140 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2143 fn to_result(self) -> Result<T, Self::Error> {
2148 impl<T, E> MaybeResult<T> for Result<T, E> {
2151 fn from(x: Result<T, Self::Error>) -> Self {
2154 fn to_result(self) -> Result<T, Self::Error> {
2159 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2161 /// Trait for contexts that want to be able to compute layouts of types.
2162 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2163 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2164 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2165 /// returned from `layout_of` (see also `handle_layout_err`).
2166 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2168 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2169 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2171 fn layout_tcx_at_span(&self) -> Span {
2175 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2176 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2178 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2179 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2180 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2181 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2182 fn handle_layout_err(
2184 err: LayoutError<'tcx>,
2187 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2190 /// Blanket extension trait for contexts that can compute layouts of types.
2191 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2192 /// Computes the layout of a type. Note that this implicitly
2193 /// executes in "reveal all" mode, and will normalize the input type.
2195 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2196 self.spanned_layout_of(ty, DUMMY_SP)
2199 /// Computes the layout of a type, at `span`. Note that this implicitly
2200 /// executes in "reveal all" mode, and will normalize the input type.
2201 // FIXME(eddyb) avoid passing information like this, and instead add more
2202 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2204 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2205 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2206 let tcx = self.tcx().at(span);
2209 tcx.layout_of(self.param_env().and(ty))
2210 .map_err(|err| self.handle_layout_err(err, span, ty)),
2215 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2217 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2218 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2221 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2226 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2227 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2230 fn layout_tcx_at_span(&self) -> Span {
2235 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2240 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2242 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2244 fn ty_and_layout_for_variant(
2245 this: TyAndLayout<'tcx>,
2247 variant_index: VariantIdx,
2248 ) -> TyAndLayout<'tcx> {
2249 let layout = match this.variants {
2250 Variants::Single { index }
2251 // If all variants but one are uninhabited, the variant layout is the enum layout.
2252 if index == variant_index &&
2253 // Don't confuse variants of uninhabited enums with the enum itself.
2254 // For more details see https://github.com/rust-lang/rust/issues/69763.
2255 this.fields != FieldsShape::Primitive =>
2260 Variants::Single { index } => {
2262 let param_env = cx.param_env();
2264 // Deny calling for_variant more than once for non-Single enums.
2265 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2266 assert_eq!(original_layout.variants, Variants::Single { index });
2269 let fields = match this.ty.kind() {
2270 ty::Adt(def, _) if def.variants().is_empty() =>
2271 bug!("for_variant called on zero-variant enum"),
2272 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2275 tcx.intern_layout(LayoutS {
2276 variants: Variants::Single { index: variant_index },
2277 fields: match NonZeroUsize::new(fields) {
2278 Some(fields) => FieldsShape::Union(fields),
2279 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2281 abi: Abi::Uninhabited,
2282 largest_niche: None,
2283 align: tcx.data_layout.i8_align,
2288 Variants::Multiple { ref variants, .. } => variants[variant_index],
2291 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2293 TyAndLayout { ty: this.ty, layout }
2296 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2297 enum TyMaybeWithLayout<'tcx> {
2299 TyAndLayout(TyAndLayout<'tcx>),
2302 fn field_ty_or_layout<'tcx>(
2303 this: TyAndLayout<'tcx>,
2304 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2306 ) -> TyMaybeWithLayout<'tcx> {
2308 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2310 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2311 ty: tag.primitive().to_ty(tcx),
2315 match *this.ty.kind() {
2324 | ty::GeneratorWitness(..)
2326 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2328 // Potentially-fat pointers.
2329 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2330 assert!(i < this.fields.count());
2332 // Reuse the fat `*T` type as its own thin pointer data field.
2333 // This provides information about, e.g., DST struct pointees
2334 // (which may have no non-DST form), and will work as long
2335 // as the `Abi` or `FieldsShape` is checked by users.
2337 let nil = tcx.mk_unit();
2338 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2341 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2344 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2345 // the `Result` should always work because the type is
2346 // always either `*mut ()` or `&'static mut ()`.
2347 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2349 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2353 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2354 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2355 ty::Dynamic(_, _) => {
2356 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2357 tcx.lifetimes.re_static,
2358 tcx.mk_array(tcx.types.usize, 3),
2360 /* FIXME: use actual fn pointers
2361 Warning: naively computing the number of entries in the
2362 vtable by counting the methods on the trait + methods on
2363 all parent traits does not work, because some methods can
2364 be not object safe and thus excluded from the vtable.
2365 Increase this counter if you tried to implement this but
2366 failed to do it without duplicating a lot of code from
2367 other places in the compiler: 2
2369 tcx.mk_array(tcx.types.usize, 3),
2370 tcx.mk_array(Option<fn()>),
2374 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2378 // Arrays and slices.
2379 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2380 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2382 // Tuples, generators and closures.
2383 ty::Closure(_, ref substs) => field_ty_or_layout(
2384 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2389 ty::Generator(def_id, ref substs, _) => match this.variants {
2390 Variants::Single { index } => TyMaybeWithLayout::Ty(
2393 .state_tys(def_id, tcx)
2394 .nth(index.as_usize())
2399 Variants::Multiple { tag, tag_field, .. } => {
2401 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2403 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2407 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2410 ty::Adt(def, substs) => {
2411 match this.variants {
2412 Variants::Single { index } => {
2413 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2416 // Discriminant field for enums (where applicable).
2417 Variants::Multiple { tag, .. } => {
2419 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2426 | ty::Placeholder(..)
2430 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2434 match field_ty_or_layout(this, cx, i) {
2435 TyMaybeWithLayout::Ty(field_ty) => {
2436 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2438 "failed to get layout for `{}`: {},\n\
2439 despite it being a field (#{}) of an existing layout: {:#?}",
2447 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2451 fn ty_and_layout_pointee_info_at(
2452 this: TyAndLayout<'tcx>,
2455 ) -> Option<PointeeInfo> {
2457 let param_env = cx.param_env();
2459 let addr_space_of_ty = |ty: Ty<'tcx>| {
2460 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2463 let pointee_info = match *this.ty.kind() {
2464 ty::RawPtr(mt) if offset.bytes() == 0 => {
2465 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2467 align: layout.align.abi,
2469 address_space: addr_space_of_ty(mt.ty),
2472 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2473 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2475 align: layout.align.abi,
2477 address_space: cx.data_layout().instruction_address_space,
2480 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2481 let address_space = addr_space_of_ty(ty);
2482 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2483 // Use conservative pointer kind if not optimizing. This saves us the
2484 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2485 // attributes in LLVM have compile-time cost even in unoptimized builds).
2489 hir::Mutability::Not => {
2490 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2496 hir::Mutability::Mut => {
2497 // References to self-referential structures should not be considered
2498 // noalias, as another pointer to the structure can be obtained, that
2499 // is not based-on the original reference. We consider all !Unpin
2500 // types to be potentially self-referential here.
2501 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2502 PointerKind::UniqueBorrowed
2510 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2512 align: layout.align.abi,
2519 let mut data_variant = match this.variants {
2520 // Within the discriminant field, only the niche itself is
2521 // always initialized, so we only check for a pointer at its
2524 // If the niche is a pointer, it's either valid (according
2525 // to its type), or null (which the niche field's scalar
2526 // validity range encodes). This allows using
2527 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2528 // this will continue to work as long as we don't start
2529 // using more niches than just null (e.g., the first page of
2530 // the address space, or unaligned pointers).
2531 Variants::Multiple {
2532 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2535 } if this.fields.offset(tag_field) == offset => {
2536 Some(this.for_variant(cx, dataful_variant))
2541 if let Some(variant) = data_variant {
2542 // We're not interested in any unions.
2543 if let FieldsShape::Union(_) = variant.fields {
2544 data_variant = None;
2548 let mut result = None;
2550 if let Some(variant) = data_variant {
2551 let ptr_end = offset + Pointer.size(cx);
2552 for i in 0..variant.fields.count() {
2553 let field_start = variant.fields.offset(i);
2554 if field_start <= offset {
2555 let field = variant.field(cx, i);
2556 result = field.to_result().ok().and_then(|field| {
2557 if ptr_end <= field_start + field.size {
2558 // We found the right field, look inside it.
2560 field.pointee_info_at(cx, offset - field_start);
2566 if result.is_some() {
2573 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2574 if let Some(ref mut pointee) = result {
2575 if let ty::Adt(def, _) = this.ty.kind() {
2576 if def.is_box() && offset.bytes() == 0 {
2577 pointee.safe = Some(PointerKind::UniqueOwned);
2587 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2596 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2597 matches!(this.ty.kind(), ty::Adt(..))
2600 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2601 this.ty.kind() == &ty::Never
2604 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2605 matches!(this.ty.kind(), ty::Tuple(..))
2608 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2609 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2613 impl<'tcx> ty::Instance<'tcx> {
2614 // NOTE(eddyb) this is private to avoid using it from outside of
2615 // `fn_abi_of_instance` - any other uses are either too high-level
2616 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2617 // or should go through `FnAbi` instead, to avoid losing any
2618 // adjustments `fn_abi_of_instance` might be performing.
2619 fn fn_sig_for_fn_abi(
2622 param_env: ty::ParamEnv<'tcx>,
2623 ) -> ty::PolyFnSig<'tcx> {
2624 let ty = self.ty(tcx, param_env);
2627 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2628 // parameters unused if they show up in the signature, but not in the `mir::Body`
2629 // (i.e. due to being inside a projection that got normalized, see
2630 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2631 // track of a polymorphization `ParamEnv` to allow normalizing later.
2632 let mut sig = match *ty.kind() {
2633 ty::FnDef(def_id, substs) => tcx
2634 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2635 .subst(tcx, substs),
2636 _ => unreachable!(),
2639 if let ty::InstanceDef::VtableShim(..) = self.def {
2640 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2641 sig = sig.map_bound(|mut sig| {
2642 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2643 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2644 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2650 ty::Closure(def_id, substs) => {
2651 let sig = substs.as_closure().sig();
2653 let bound_vars = tcx.mk_bound_variable_kinds(
2656 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2658 let br = ty::BoundRegion {
2659 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2660 kind: ty::BoundRegionKind::BrEnv,
2662 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2663 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2665 let sig = sig.skip_binder();
2666 ty::Binder::bind_with_vars(
2668 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2677 ty::Generator(_, substs, _) => {
2678 let sig = substs.as_generator().poly_sig();
2680 let bound_vars = tcx.mk_bound_variable_kinds(
2683 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2685 let br = ty::BoundRegion {
2686 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2687 kind: ty::BoundRegionKind::BrEnv,
2689 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2690 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2692 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2693 let pin_adt_ref = tcx.adt_def(pin_did);
2694 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2695 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2697 let sig = sig.skip_binder();
2698 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2699 let state_adt_ref = tcx.adt_def(state_did);
2700 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2701 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2702 ty::Binder::bind_with_vars(
2704 [env_ty, sig.resume_ty].iter(),
2707 hir::Unsafety::Normal,
2708 rustc_target::spec::abi::Abi::Rust,
2713 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2718 /// Calculates whether a function's ABI can unwind or not.
2720 /// This takes two primary parameters:
2722 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2723 /// codegen attrs for a defined function. For function pointers this set of
2724 /// flags is the empty set. This is only applicable for Rust-defined
2725 /// functions, and generally isn't needed except for small optimizations where
2726 /// we try to say a function which otherwise might look like it could unwind
2727 /// doesn't actually unwind (such as for intrinsics and such).
2729 /// * `abi` - this is the ABI that the function is defined with. This is the
2730 /// primary factor for determining whether a function can unwind or not.
2732 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2733 /// panics are implemented with unwinds on most platform (when
2734 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2735 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2736 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2737 /// defined for each ABI individually, but it always corresponds to some form of
2738 /// stack-based unwinding (the exact mechanism of which varies
2739 /// platform-by-platform).
2741 /// Rust functions are classified whether or not they can unwind based on the
2742 /// active "panic strategy". In other words Rust functions are considered to
2743 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2744 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2745 /// only if the final panic mode is panic=abort. In this scenario any code
2746 /// previously compiled assuming that a function can unwind is still correct, it
2747 /// just never happens to actually unwind at runtime.
2749 /// This function's answer to whether or not a function can unwind is quite
2750 /// impactful throughout the compiler. This affects things like:
2752 /// * Calling a function which can't unwind means codegen simply ignores any
2753 /// associated unwinding cleanup.
2754 /// * Calling a function which can unwind from a function which can't unwind
2755 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2756 /// aborts the process.
2757 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2758 /// affects various optimizations and codegen.
2760 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2761 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2762 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2763 /// might (from a foreign exception or similar).
2765 pub fn fn_can_unwind<'tcx>(
2767 codegen_fn_attr_flags: CodegenFnAttrFlags,
2770 // Special attribute for functions which can't unwind.
2771 if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2775 // Otherwise if this isn't special then unwinding is generally determined by
2776 // the ABI of the itself. ABIs like `C` have variants which also
2777 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2778 // ABIs have such an option. Otherwise the only other thing here is Rust
2779 // itself, and those ABIs are determined by the panic strategy configured
2780 // for this compilation.
2782 // Unfortunately at this time there's also another caveat. Rust [RFC
2783 // 2945][rfc] has been accepted and is in the process of being implemented
2784 // and stabilized. In this interim state we need to deal with historical
2785 // rustc behavior as well as plan for future rustc behavior.
2787 // Historically functions declared with `extern "C"` were marked at the
2788 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2789 // or not. This is UB for functions in `panic=unwind` mode that then
2790 // actually panic and unwind. Note that this behavior is true for both
2791 // externally declared functions as well as Rust-defined function.
2793 // To fix this UB rustc would like to change in the future to catch unwinds
2794 // from function calls that may unwind within a Rust-defined `extern "C"`
2795 // function and forcibly abort the process, thereby respecting the
2796 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2797 // ready to roll out, so determining whether or not the `C` family of ABIs
2798 // unwinds is conditional not only on their definition but also whether the
2799 // `#![feature(c_unwind)]` feature gate is active.
2801 // Note that this means that unlike historical compilers rustc now, by
2802 // default, unconditionally thinks that the `C` ABI may unwind. This will
2803 // prevent some optimization opportunities, however, so we try to scope this
2804 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2805 // to `panic=abort`).
2807 // Eventually the check against `c_unwind` here will ideally get removed and
2808 // this'll be a little cleaner as it'll be a straightforward check of the
2811 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2817 | Stdcall { unwind }
2818 | Fastcall { unwind }
2819 | Vectorcall { unwind }
2820 | Thiscall { unwind }
2823 | SysV64 { unwind } => {
2825 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2833 | AvrNonBlockingInterrupt
2834 | CCmseNonSecureCall
2838 | Unadjusted => false,
2839 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2844 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2845 use rustc_target::spec::abi::Abi::*;
2846 match tcx.sess.target.adjust_abi(abi) {
2847 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2849 // It's the ABI's job to select this, not ours.
2850 System { .. } => bug!("system abi should be selected elsewhere"),
2851 EfiApi => bug!("eficall abi should be selected elsewhere"),
2853 Stdcall { .. } => Conv::X86Stdcall,
2854 Fastcall { .. } => Conv::X86Fastcall,
2855 Vectorcall { .. } => Conv::X86VectorCall,
2856 Thiscall { .. } => Conv::X86ThisCall,
2857 C { .. } => Conv::C,
2858 Unadjusted => Conv::C,
2859 Win64 { .. } => Conv::X86_64Win64,
2860 SysV64 { .. } => Conv::X86_64SysV,
2861 Aapcs { .. } => Conv::ArmAapcs,
2862 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2863 PtxKernel => Conv::PtxKernel,
2864 Msp430Interrupt => Conv::Msp430Intr,
2865 X86Interrupt => Conv::X86Intr,
2866 AmdGpuKernel => Conv::AmdGpuKernel,
2867 AvrInterrupt => Conv::AvrInterrupt,
2868 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2871 // These API constants ought to be more specific...
2872 Cdecl { .. } => Conv::C,
2876 /// Error produced by attempting to compute or adjust a `FnAbi`.
2877 #[derive(Copy, Clone, Debug, HashStable)]
2878 pub enum FnAbiError<'tcx> {
2879 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2880 Layout(LayoutError<'tcx>),
2882 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2883 AdjustForForeignAbi(call::AdjustForForeignAbiError),
2886 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
2887 fn from(err: LayoutError<'tcx>) -> Self {
2892 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
2893 fn from(err: call::AdjustForForeignAbiError) -> Self {
2894 Self::AdjustForForeignAbi(err)
2898 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
2899 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2901 Self::Layout(err) => err.fmt(f),
2902 Self::AdjustForForeignAbi(err) => err.fmt(f),
2907 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2908 // just for error handling.
2910 pub enum FnAbiRequest<'tcx> {
2911 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2912 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2915 /// Trait for contexts that want to be able to compute `FnAbi`s.
2916 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2917 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2918 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2919 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2920 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2922 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2923 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2925 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2926 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2927 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2928 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
2929 fn handle_fn_abi_err(
2931 err: FnAbiError<'tcx>,
2933 fn_abi_request: FnAbiRequest<'tcx>,
2934 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2937 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2938 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2939 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2941 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2942 /// instead, where the instance is an `InstanceDef::Virtual`.
2944 fn fn_abi_of_fn_ptr(
2946 sig: ty::PolyFnSig<'tcx>,
2947 extra_args: &'tcx ty::List<Ty<'tcx>>,
2948 ) -> Self::FnAbiOfResult {
2949 // FIXME(eddyb) get a better `span` here.
2950 let span = self.layout_tcx_at_span();
2951 let tcx = self.tcx().at(span);
2953 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2954 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2958 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2959 /// direct calls to an `fn`.
2961 /// NB: that includes virtual calls, which are represented by "direct calls"
2962 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2964 fn fn_abi_of_instance(
2966 instance: ty::Instance<'tcx>,
2967 extra_args: &'tcx ty::List<Ty<'tcx>>,
2968 ) -> Self::FnAbiOfResult {
2969 // FIXME(eddyb) get a better `span` here.
2970 let span = self.layout_tcx_at_span();
2971 let tcx = self.tcx().at(span);
2974 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2975 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2976 // we can get some kind of span even if one wasn't provided.
2977 // However, we don't do this early in order to avoid calling
2978 // `def_span` unconditionally (which may have a perf penalty).
2979 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2980 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2986 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2988 fn fn_abi_of_fn_ptr<'tcx>(
2990 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2991 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2992 let (param_env, (sig, extra_args)) = query.into_parts();
2994 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2998 CodegenFnAttrFlags::empty(),
3003 fn fn_abi_of_instance<'tcx>(
3005 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3006 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3007 let (param_env, (instance, extra_args)) = query.into_parts();
3009 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3011 let caller_location = if instance.def.requires_caller_location(tcx) {
3012 Some(tcx.caller_location_ty())
3017 let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
3019 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3024 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3028 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3029 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3030 // arguments of this method, into a separate `struct`.
3031 fn fn_abi_new_uncached(
3033 sig: ty::PolyFnSig<'tcx>,
3034 extra_args: &[Ty<'tcx>],
3035 caller_location: Option<Ty<'tcx>>,
3036 codegen_fn_attr_flags: CodegenFnAttrFlags,
3037 // FIXME(eddyb) replace this with something typed, like an `enum`.
3038 force_thin_self_ptr: bool,
3039 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3040 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3042 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3044 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3046 let mut inputs = sig.inputs();
3047 let extra_args = if sig.abi == RustCall {
3048 assert!(!sig.c_variadic && extra_args.is_empty());
3050 if let Some(input) = sig.inputs().last() {
3051 if let ty::Tuple(tupled_arguments) = input.kind() {
3052 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3056 "argument to function with \"rust-call\" ABI \
3062 "argument to function with \"rust-call\" ABI \
3067 assert!(sig.c_variadic || extra_args.is_empty());
3071 let target = &self.tcx.sess.target;
3072 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3073 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3074 let linux_s390x_gnu_like =
3075 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3076 let linux_sparc64_gnu_like =
3077 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3078 let linux_powerpc_gnu_like =
3079 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3081 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3083 // Handle safe Rust thin and fat pointers.
3084 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3086 layout: TyAndLayout<'tcx>,
3089 // Booleans are always a noundef i1 that needs to be zero-extended.
3090 if scalar.is_bool() {
3091 attrs.ext(ArgExtension::Zext);
3092 attrs.set(ArgAttribute::NoUndef);
3096 // Scalars which have invalid values cannot be undef.
3097 if !scalar.is_always_valid(self) {
3098 attrs.set(ArgAttribute::NoUndef);
3101 // Only pointer types handled below.
3102 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3104 if !valid_range.contains(0) {
3105 attrs.set(ArgAttribute::NonNull);
3108 if let Some(pointee) = layout.pointee_info_at(self, offset) {
3109 if let Some(kind) = pointee.safe {
3110 attrs.pointee_align = Some(pointee.align);
3112 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3113 // for the entire duration of the function as they can be deallocated
3114 // at any time. Set their valid size to 0.
3115 attrs.pointee_size = match kind {
3116 PointerKind::UniqueOwned => Size::ZERO,
3120 // `Box`, `&T`, and `&mut T` cannot be undef.
3121 // Note that this only applies to the value of the pointer itself;
3122 // this attribute doesn't make it UB for the pointed-to data to be undef.
3123 attrs.set(ArgAttribute::NoUndef);
3125 // `Box` pointer parameters never alias because ownership is transferred
3126 // `&mut` pointer parameters never alias other parameters,
3127 // or mutable global data
3129 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3130 // and can be marked as both `readonly` and `noalias`, as
3131 // LLVM's definition of `noalias` is based solely on memory
3132 // dependencies rather than pointer equality
3134 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3135 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3136 // or not to actually emit the attribute. It can also be controlled with the
3137 // `-Zmutable-noalias` debugging option.
3138 let no_alias = match kind {
3139 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3140 PointerKind::UniqueOwned => true,
3141 PointerKind::Frozen => !is_return,
3144 attrs.set(ArgAttribute::NoAlias);
3147 if kind == PointerKind::Frozen && !is_return {
3148 attrs.set(ArgAttribute::ReadOnly);
3151 if kind == PointerKind::UniqueBorrowed && !is_return {
3152 attrs.set(ArgAttribute::NoAliasMutRef);
3158 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3159 let is_return = arg_idx.is_none();
3161 let layout = self.layout_of(ty)?;
3162 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3163 // Don't pass the vtable, it's not an argument of the virtual fn.
3164 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3165 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3166 make_thin_self_ptr(self, layout)
3171 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3172 let mut attrs = ArgAttributes::new();
3173 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3177 if arg.layout.is_zst() {
3178 // For some forsaken reason, x86_64-pc-windows-gnu
3179 // doesn't ignore zero-sized struct arguments.
3180 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3184 && !linux_s390x_gnu_like
3185 && !linux_sparc64_gnu_like
3186 && !linux_powerpc_gnu_like)
3188 arg.mode = PassMode::Ignore;
3195 let mut fn_abi = FnAbi {
3196 ret: arg_of(sig.output(), None)?,
3200 .chain(extra_args.iter().copied())
3201 .chain(caller_location)
3203 .map(|(i, ty)| arg_of(ty, Some(i)))
3204 .collect::<Result<_, _>>()?,
3205 c_variadic: sig.c_variadic,
3206 fixed_count: inputs.len(),
3208 can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3210 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3211 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3212 Ok(self.tcx.arena.alloc(fn_abi))
3215 fn fn_abi_adjust_for_abi(
3217 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3219 ) -> Result<(), FnAbiError<'tcx>> {
3220 if abi == SpecAbi::Unadjusted {
3224 if abi == SpecAbi::Rust
3225 || abi == SpecAbi::RustCall
3226 || abi == SpecAbi::RustIntrinsic
3227 || abi == SpecAbi::PlatformIntrinsic
3229 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3230 if arg.is_ignore() {
3234 match arg.layout.abi {
3235 Abi::Aggregate { .. } => {}
3237 // This is a fun case! The gist of what this is doing is
3238 // that we want callers and callees to always agree on the
3239 // ABI of how they pass SIMD arguments. If we were to *not*
3240 // make these arguments indirect then they'd be immediates
3241 // in LLVM, which means that they'd used whatever the
3242 // appropriate ABI is for the callee and the caller. That
3243 // means, for example, if the caller doesn't have AVX
3244 // enabled but the callee does, then passing an AVX argument
3245 // across this boundary would cause corrupt data to show up.
3247 // This problem is fixed by unconditionally passing SIMD
3248 // arguments through memory between callers and callees
3249 // which should get them all to agree on ABI regardless of
3250 // target feature sets. Some more information about this
3251 // issue can be found in #44367.
3253 // Note that the platform intrinsic ABI is exempt here as
3254 // that's how we connect up to LLVM and it's unstable
3255 // anyway, we control all calls to it in libstd.
3257 if abi != SpecAbi::PlatformIntrinsic
3258 && self.tcx.sess.target.simd_types_indirect =>
3260 arg.make_indirect();
3267 let size = arg.layout.size;
3268 if arg.layout.is_unsized() || size > Pointer.size(self) {
3269 arg.make_indirect();
3271 // We want to pass small aggregates as immediates, but using
3272 // a LLVM aggregate type for this leads to bad optimizations,
3273 // so we pick an appropriately sized integer type instead.
3274 arg.cast_to(Reg { kind: RegKind::Integer, size });
3277 fixup(&mut fn_abi.ret);
3278 for arg in &mut fn_abi.args {
3282 fn_abi.adjust_for_foreign_abi(self, abi)?;
3289 fn make_thin_self_ptr<'tcx>(
3290 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3291 layout: TyAndLayout<'tcx>,
3292 ) -> TyAndLayout<'tcx> {
3294 let fat_pointer_ty = if layout.is_unsized() {
3295 // unsized `self` is passed as a pointer to `self`
3296 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3297 tcx.mk_mut_ptr(layout.ty)
3300 Abi::ScalarPair(..) => (),
3301 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3304 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3305 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3306 // elsewhere in the compiler as a method on a `dyn Trait`.
3307 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3308 // get a built-in pointer type
3309 let mut fat_pointer_layout = layout;
3310 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3311 && !fat_pointer_layout.ty.is_region_ptr()
3313 for i in 0..fat_pointer_layout.fields.count() {
3314 let field_layout = fat_pointer_layout.field(cx, i);
3316 if !field_layout.is_zst() {
3317 fat_pointer_layout = field_layout;
3318 continue 'descend_newtypes;
3322 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3325 fat_pointer_layout.ty
3328 // we now have a type like `*mut RcBox<dyn Trait>`
3329 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3330 // this is understood as a special case elsewhere in the compiler
3331 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3336 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3337 // should always work because the type is always `*mut ()`.
3338 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()