1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
5 self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
9 use rustc_attr as attr;
11 use rustc_hir::def_id::DefId;
12 use rustc_hir::lang_items::LangItem;
13 use rustc_index::bit_set::BitSet;
14 use rustc_index::vec::{Idx, IndexVec};
15 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
16 use rustc_span::symbol::Symbol;
17 use rustc_span::{Span, DUMMY_SP};
18 use rustc_target::abi::call::{
19 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
21 use rustc_target::abi::*;
22 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
24 use std::cmp::{self, Ordering};
27 use std::num::NonZeroUsize;
30 use rand::{seq::SliceRandom, SeedableRng};
31 use rand_xoshiro::Xoshiro128StarStar;
33 pub fn provide(providers: &mut ty::query::Providers) {
35 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
38 pub trait IntegerExt {
39 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
40 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
41 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
42 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
52 impl IntegerExt for Integer {
54 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
55 match (*self, signed) {
56 (I8, false) => tcx.types.u8,
57 (I16, false) => tcx.types.u16,
58 (I32, false) => tcx.types.u32,
59 (I64, false) => tcx.types.u64,
60 (I128, false) => tcx.types.u128,
61 (I8, true) => tcx.types.i8,
62 (I16, true) => tcx.types.i16,
63 (I32, true) => tcx.types.i32,
64 (I64, true) => tcx.types.i64,
65 (I128, true) => tcx.types.i128,
69 /// Gets the Integer type from an attr::IntType.
70 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
71 let dl = cx.data_layout();
74 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
75 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
76 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
77 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
78 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
79 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
80 dl.ptr_sized_integer()
85 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
88 ty::IntTy::I16 => I16,
89 ty::IntTy::I32 => I32,
90 ty::IntTy::I64 => I64,
91 ty::IntTy::I128 => I128,
92 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
95 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
98 ty::UintTy::U16 => I16,
99 ty::UintTy::U32 => I32,
100 ty::UintTy::U64 => I64,
101 ty::UintTy::U128 => I128,
102 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
106 /// Finds the appropriate Integer type and signedness for the given
107 /// signed discriminant range and `#[repr]` attribute.
108 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
109 /// that shouldn't affect anything, other than maybe debuginfo.
116 ) -> (Integer, bool) {
117 // Theoretically, negative values could be larger in unsigned representation
118 // than the unsigned representation of the signed minimum. However, if there
119 // are any negative values, the only valid unsigned representation is u128
120 // which can fit all i128 values, so the result remains unaffected.
121 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
122 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
124 if let Some(ity) = repr.int {
125 let discr = Integer::from_attr(&tcx, ity);
126 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
129 "Integer::repr_discr: `#[repr]` hint too small for \
130 discriminant range of enum `{}",
134 return (discr, ity.is_signed());
137 let at_least = if repr.c() {
138 // This is usually I32, however it can be different on some platforms,
139 // notably hexagon and arm-none/thumb-none
140 tcx.data_layout().c_enum_min_size
142 // repr(Rust) enums try to be as small as possible
146 // If there are no negative values, we can use the unsigned fit.
148 (cmp::max(unsigned_fit, at_least), false)
150 (cmp::max(signed_fit, at_least), true)
155 pub trait PrimitiveExt {
156 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
157 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
160 impl PrimitiveExt for Primitive {
162 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
164 Int(i, signed) => i.to_ty(tcx, signed),
165 F32 => tcx.types.f32,
166 F64 => tcx.types.f64,
167 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
171 /// Return an *integer* type matching this primitive.
172 /// Useful in particular when dealing with enum discriminants.
174 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
176 Int(i, signed) => i.to_ty(tcx, signed),
177 Pointer => tcx.types.usize,
178 F32 | F64 => bug!("floats do not have an int type"),
183 /// The first half of a fat pointer.
185 /// - For a trait object, this is the address of the box.
186 /// - For a slice, this is the base address.
187 pub const FAT_PTR_ADDR: usize = 0;
189 /// The second half of a fat pointer.
191 /// - For a trait object, this is the address of the vtable.
192 /// - For a slice, this is the length.
193 pub const FAT_PTR_EXTRA: usize = 1;
195 /// The maximum supported number of lanes in a SIMD vector.
197 /// This value is selected based on backend support:
198 /// * LLVM does not appear to have a vector width limit.
199 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
200 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
202 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
203 pub enum LayoutError<'tcx> {
205 SizeOverflow(Ty<'tcx>),
206 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
209 impl<'tcx> fmt::Display for LayoutError<'tcx> {
210 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
212 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
213 LayoutError::SizeOverflow(ty) => {
214 write!(f, "values of the type `{}` are too big for the current architecture", ty)
216 LayoutError::NormalizationFailure(t, e) => write!(
218 "unable to determine layout for `{}` because `{}` cannot be normalized",
220 e.get_type_for_failure()
226 #[instrument(skip(tcx, query), level = "debug")]
229 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
230 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
231 let (param_env, ty) = query.into_parts();
234 let param_env = param_env.with_reveal_all_normalized(tcx);
235 let unnormalized_ty = ty;
237 // FIXME: We might want to have two different versions of `layout_of`:
238 // One that can be called after typecheck has completed and can use
239 // `normalize_erasing_regions` here and another one that can be called
240 // before typecheck has completed and uses `try_normalize_erasing_regions`.
241 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
243 Err(normalization_error) => {
244 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
248 if ty != unnormalized_ty {
249 // Ensure this layout is also cached for the normalized type.
250 return tcx.layout_of(param_env.and(ty));
253 let cx = LayoutCx { tcx, param_env };
255 let layout = cx.layout_of_uncached(ty)?;
256 let layout = TyAndLayout { ty, layout };
258 cx.record_layout_for_printing(layout);
260 sanity_check_layout(&cx, &layout);
265 #[derive(Clone, Copy)]
266 pub struct LayoutCx<'tcx, C> {
268 pub param_env: ty::ParamEnv<'tcx>,
271 #[derive(Copy, Clone, Debug)]
273 /// A tuple, closure, or univariant which cannot be coerced to unsized.
275 /// A univariant, the last field of which may be coerced to unsized.
277 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
278 Prefixed(Size, Align),
281 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
282 // This is used to go between `memory_index` (source field order to memory order)
283 // and `inverse_memory_index` (memory order to source field order).
284 // See also `FieldsShape::Arbitrary::memory_index` for more details.
285 // FIXME(eddyb) build a better abstraction for permutations, if possible.
286 fn invert_mapping(map: &[u32]) -> Vec<u32> {
287 let mut inverse = vec![0; map.len()];
288 for i in 0..map.len() {
289 inverse[map[i] as usize] = i as u32;
294 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
295 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
296 let dl = self.data_layout();
297 let b_align = b.align(dl);
298 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
299 let b_offset = a.size(dl).align_to(b_align.abi);
300 let size = (b_offset + b.size(dl)).align_to(align.abi);
302 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
303 // returns the last maximum.
304 let largest_niche = Niche::from_scalar(dl, b_offset, b)
306 .chain(Niche::from_scalar(dl, Size::ZERO, a))
307 .max_by_key(|niche| niche.available(dl));
310 variants: Variants::Single { index: VariantIdx::new(0) },
311 fields: FieldsShape::Arbitrary {
312 offsets: vec![Size::ZERO, b_offset],
313 memory_index: vec![0, 1],
315 abi: Abi::ScalarPair(a, b),
322 fn univariant_uninterned(
325 fields: &[TyAndLayout<'_>],
328 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
329 let dl = self.data_layout();
330 let pack = repr.pack;
331 if pack.is_some() && repr.align.is_some() {
332 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
333 return Err(LayoutError::Unknown(ty));
336 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
338 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
340 let optimize = !repr.inhibit_struct_field_reordering_opt();
343 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
344 let optimizing = &mut inverse_memory_index[..end];
345 let field_align = |f: &TyAndLayout<'_>| {
346 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
349 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
350 // the field ordering to try and catch some code making assumptions about layouts
351 // we don't guarantee
352 if repr.can_randomize_type_layout() {
353 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
354 // randomize field ordering with
355 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
357 // Shuffle the ordering of the fields
358 optimizing.shuffle(&mut rng);
360 // Otherwise we just leave things alone and actually optimize the type's fields
363 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
364 optimizing.sort_by_key(|&x| {
365 // Place ZSTs first to avoid "interesting offsets",
366 // especially with only one or two non-ZST fields.
367 let f = &fields[x as usize];
368 (!f.is_zst(), cmp::Reverse(field_align(f)))
372 StructKind::Prefixed(..) => {
373 // Sort in ascending alignment so that the layout stays optimal
374 // regardless of the prefix
375 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
379 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
380 // regardless of the status of `-Z randomize-layout`
384 // inverse_memory_index holds field indices by increasing memory offset.
385 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
386 // We now write field offsets to the corresponding offset slot;
387 // field 5 with offset 0 puts 0 in offsets[5].
388 // At the bottom of this function, we invert `inverse_memory_index` to
389 // produce `memory_index` (see `invert_mapping`).
391 let mut sized = true;
392 let mut offsets = vec![Size::ZERO; fields.len()];
393 let mut offset = Size::ZERO;
394 let mut largest_niche = None;
395 let mut largest_niche_available = 0;
397 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
399 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
400 align = align.max(AbiAndPrefAlign::new(prefix_align));
401 offset = prefix_size.align_to(prefix_align);
404 for &i in &inverse_memory_index {
405 let field = fields[i as usize];
407 self.tcx.sess.delay_span_bug(
410 "univariant: field #{} of `{}` comes after unsized field",
417 if field.is_unsized() {
421 // Invariant: offset < dl.obj_size_bound() <= 1<<61
422 let field_align = if let Some(pack) = pack {
423 field.align.min(AbiAndPrefAlign::new(pack))
427 offset = offset.align_to(field_align.abi);
428 align = align.max(field_align);
430 debug!("univariant offset: {:?} field: {:#?}", offset, field);
431 offsets[i as usize] = offset;
433 if let Some(mut niche) = field.largest_niche {
434 let available = niche.available(dl);
435 if available > largest_niche_available {
436 largest_niche_available = available;
437 niche.offset += offset;
438 largest_niche = Some(niche);
442 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
445 if let Some(repr_align) = repr.align {
446 align = align.max(AbiAndPrefAlign::new(repr_align));
449 debug!("univariant min_size: {:?}", offset);
450 let min_size = offset;
452 // As stated above, inverse_memory_index holds field indices by increasing offset.
453 // This makes it an already-sorted view of the offsets vec.
454 // To invert it, consider:
455 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
456 // Field 5 would be the first element, so memory_index is i:
457 // Note: if we didn't optimize, it's already right.
460 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
462 let size = min_size.align_to(align.abi);
463 let mut abi = Abi::Aggregate { sized };
465 // Unpack newtype ABIs and find scalar pairs.
466 if sized && size.bytes() > 0 {
467 // All other fields must be ZSTs.
468 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
470 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
471 // We have exactly one non-ZST field.
472 (Some((i, field)), None, None) => {
473 // Field fills the struct and it has a scalar or scalar pair ABI.
474 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
477 // For plain scalars, or vectors of them, we can't unpack
478 // newtypes for `#[repr(C)]`, as that affects C ABIs.
479 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
482 // But scalar pairs are Rust-specific and get
483 // treated as aggregates by C ABIs anyway.
484 Abi::ScalarPair(..) => {
492 // Two non-ZST fields, and they're both scalars.
493 (Some((i, a)), Some((j, b)), None) => {
494 match (a.abi, b.abi) {
495 (Abi::Scalar(a), Abi::Scalar(b)) => {
496 // Order by the memory placement, not source order.
497 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
502 let pair = self.scalar_pair(a, b);
503 let pair_offsets = match pair.fields {
504 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
505 assert_eq!(memory_index, &[0, 1]);
510 if offsets[i] == pair_offsets[0]
511 && offsets[j] == pair_offsets[1]
512 && align == pair.align
515 // We can use `ScalarPair` only when it matches our
516 // already computed layout (including `#[repr(C)]`).
528 if fields.iter().any(|f| f.abi.is_uninhabited()) {
529 abi = Abi::Uninhabited;
533 variants: Variants::Single { index: VariantIdx::new(0) },
534 fields: FieldsShape::Arbitrary { offsets, memory_index },
542 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
544 let param_env = self.param_env;
545 let dl = self.data_layout();
546 let scalar_unit = |value: Primitive| {
547 let size = value.size(dl);
548 assert!(size.bits() <= 128);
549 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
552 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
554 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
555 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
557 debug_assert!(!ty.has_infer_types_or_consts());
559 Ok(match *ty.kind() {
561 ty::Bool => tcx.intern_layout(LayoutS::scalar(
563 Scalar::Initialized {
564 value: Int(I8, false),
565 valid_range: WrappingRange { start: 0, end: 1 },
568 ty::Char => tcx.intern_layout(LayoutS::scalar(
570 Scalar::Initialized {
571 value: Int(I32, false),
572 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
575 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
576 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
577 ty::Float(fty) => scalar(match fty {
578 ty::FloatTy::F32 => F32,
579 ty::FloatTy::F64 => F64,
582 let mut ptr = scalar_unit(Pointer);
583 ptr.valid_range_mut().start = 1;
584 tcx.intern_layout(LayoutS::scalar(self, ptr))
588 ty::Never => tcx.intern_layout(LayoutS {
589 variants: Variants::Single { index: VariantIdx::new(0) },
590 fields: FieldsShape::Primitive,
591 abi: Abi::Uninhabited,
597 // Potentially-wide pointers.
598 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
599 let mut data_ptr = scalar_unit(Pointer);
600 if !ty.is_unsafe_ptr() {
601 data_ptr.valid_range_mut().start = 1;
604 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
605 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
606 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
609 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
610 let metadata = match unsized_part.kind() {
612 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
614 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
616 let mut vtable = scalar_unit(Pointer);
617 vtable.valid_range_mut().start = 1;
620 _ => return Err(LayoutError::Unknown(unsized_part)),
623 // Effectively a (ptr, meta) tuple.
624 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
627 ty::Dynamic(_, _, ty::DynStar) => {
628 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
629 data.valid_range_mut().start = 0;
630 let mut vtable = scalar_unit(Pointer);
631 vtable.valid_range_mut().start = 1;
632 tcx.intern_layout(self.scalar_pair(data, vtable))
635 // Arrays and slices.
636 ty::Array(element, mut count) => {
637 if count.has_projections() {
638 count = tcx.normalize_erasing_regions(param_env, count);
639 if count.has_projections() {
640 return Err(LayoutError::Unknown(ty));
644 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
645 let element = self.layout_of(element)?;
647 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
650 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
653 Abi::Aggregate { sized: true }
656 let largest_niche = if count != 0 { element.largest_niche } else { None };
658 tcx.intern_layout(LayoutS {
659 variants: Variants::Single { index: VariantIdx::new(0) },
660 fields: FieldsShape::Array { stride: element.size, count },
663 align: element.align,
667 ty::Slice(element) => {
668 let element = self.layout_of(element)?;
669 tcx.intern_layout(LayoutS {
670 variants: Variants::Single { index: VariantIdx::new(0) },
671 fields: FieldsShape::Array { stride: element.size, count: 0 },
672 abi: Abi::Aggregate { sized: false },
674 align: element.align,
678 ty::Str => tcx.intern_layout(LayoutS {
679 variants: Variants::Single { index: VariantIdx::new(0) },
680 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
681 abi: Abi::Aggregate { sized: false },
688 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
689 ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
690 let mut unit = self.univariant_uninterned(
693 &ReprOptions::default(),
694 StructKind::AlwaysSized,
697 Abi::Aggregate { ref mut sized } => *sized = false,
700 tcx.intern_layout(unit)
703 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
705 ty::Closure(_, ref substs) => {
706 let tys = substs.as_closure().upvar_tys();
708 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
709 &ReprOptions::default(),
710 StructKind::AlwaysSized,
716 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
719 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
720 &ReprOptions::default(),
725 // SIMD vector types.
726 ty::Adt(def, substs) if def.repr().simd() => {
727 if !def.is_struct() {
728 // Should have yielded E0517 by now.
729 tcx.sess.delay_span_bug(
731 "#[repr(simd)] was applied to an ADT that is not a struct",
733 return Err(LayoutError::Unknown(ty));
736 // Supported SIMD vectors are homogeneous ADTs with at least one field:
738 // * #[repr(simd)] struct S(T, T, T, T);
739 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
740 // * #[repr(simd)] struct S([T; 4])
742 // where T is a primitive scalar (integer/float/pointer).
744 // SIMD vectors with zero fields are not supported.
745 // (should be caught by typeck)
746 if def.non_enum_variant().fields.is_empty() {
747 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
750 // Type of the first ADT field:
751 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
753 // Heterogeneous SIMD vectors are not supported:
754 // (should be caught by typeck)
755 for fi in &def.non_enum_variant().fields {
756 if fi.ty(tcx, substs) != f0_ty {
757 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
761 // The element type and number of elements of the SIMD vector
762 // are obtained from:
764 // * the element type and length of the single array field, if
765 // the first field is of array type, or
767 // * the homogeneous field type and the number of fields.
768 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
769 // First ADT field is an array:
771 // SIMD vectors with multiple array fields are not supported:
772 // (should be caught by typeck)
773 if def.non_enum_variant().fields.len() != 1 {
774 tcx.sess.fatal(&format!(
775 "monomorphising SIMD type `{}` with more than one array field",
780 // Extract the number of elements from the layout of the array field:
781 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
782 return Err(LayoutError::Unknown(ty));
785 (*e_ty, *count, true)
787 // First ADT field is not an array:
788 (f0_ty, def.non_enum_variant().fields.len() as _, false)
791 // SIMD vectors of zero length are not supported.
792 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
795 // Can't be caught in typeck if the array length is generic.
797 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
798 } else if e_len > MAX_SIMD_LANES {
799 tcx.sess.fatal(&format!(
800 "monomorphising SIMD type `{}` of length greater than {}",
805 // Compute the ABI of the element type:
806 let e_ly = self.layout_of(e_ty)?;
807 let Abi::Scalar(e_abi) = e_ly.abi else {
808 // This error isn't caught in typeck, e.g., if
809 // the element type of the vector is generic.
810 tcx.sess.fatal(&format!(
811 "monomorphising SIMD type `{}` with a non-primitive-scalar \
812 (integer/float/pointer) element type `{}`",
817 // Compute the size and alignment of the vector:
818 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
819 let align = dl.vector_align(size);
820 let size = size.align_to(align.abi);
822 // Compute the placement of the vector fields:
823 let fields = if is_array {
824 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
826 FieldsShape::Array { stride: e_ly.size, count: e_len }
829 tcx.intern_layout(LayoutS {
830 variants: Variants::Single { index: VariantIdx::new(0) },
832 abi: Abi::Vector { element: e_abi, count: e_len },
833 largest_niche: e_ly.largest_niche,
840 ty::Adt(def, substs) => {
841 // Cache the field layouts.
848 .map(|field| self.layout_of(field.ty(tcx, substs)))
849 .collect::<Result<Vec<_>, _>>()
851 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
854 if def.repr().pack.is_some() && def.repr().align.is_some() {
855 self.tcx.sess.delay_span_bug(
856 tcx.def_span(def.did()),
857 "union cannot be packed and aligned",
859 return Err(LayoutError::Unknown(ty));
863 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
865 if let Some(repr_align) = def.repr().align {
866 align = align.max(AbiAndPrefAlign::new(repr_align));
869 let optimize = !def.repr().inhibit_union_abi_opt();
870 let mut size = Size::ZERO;
871 let mut abi = Abi::Aggregate { sized: true };
872 let index = VariantIdx::new(0);
873 for field in &variants[index] {
874 assert!(!field.is_unsized());
875 align = align.max(field.align);
877 // If all non-ZST fields have the same ABI, forward this ABI
878 if optimize && !field.is_zst() {
879 // Discard valid range information and allow undef
880 let field_abi = match field.abi {
881 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
882 Abi::ScalarPair(x, y) => {
883 Abi::ScalarPair(x.to_union(), y.to_union())
885 Abi::Vector { element: x, count } => {
886 Abi::Vector { element: x.to_union(), count }
888 Abi::Uninhabited | Abi::Aggregate { .. } => {
889 Abi::Aggregate { sized: true }
893 if size == Size::ZERO {
894 // first non ZST: initialize 'abi'
896 } else if abi != field_abi {
897 // different fields have different ABI: reset to Aggregate
898 abi = Abi::Aggregate { sized: true };
902 size = cmp::max(size, field.size);
905 if let Some(pack) = def.repr().pack {
906 align = align.min(AbiAndPrefAlign::new(pack));
909 return Ok(tcx.intern_layout(LayoutS {
910 variants: Variants::Single { index },
911 fields: FieldsShape::Union(
912 NonZeroUsize::new(variants[index].len())
913 .ok_or(LayoutError::Unknown(ty))?,
918 size: size.align_to(align.abi),
922 // A variant is absent if it's uninhabited and only has ZST fields.
923 // Present uninhabited variants only require space for their fields,
924 // but *not* an encoding of the discriminant (e.g., a tag value).
925 // See issue #49298 for more details on the need to leave space
926 // for non-ZST uninhabited data (mostly partial initialization).
927 let absent = |fields: &[TyAndLayout<'_>]| {
928 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
929 let is_zst = fields.iter().all(|f| f.is_zst());
930 uninhabited && is_zst
932 let (present_first, present_second) = {
933 let mut present_variants = variants
935 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
936 (present_variants.next(), present_variants.next())
938 let present_first = match present_first {
939 Some(present_first) => present_first,
940 // Uninhabited because it has no variants, or only absent ones.
941 None if def.is_enum() => {
942 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
944 // If it's a struct, still compute a layout so that we can still compute the
946 None => VariantIdx::new(0),
949 let is_struct = !def.is_enum() ||
950 // Only one variant is present.
951 (present_second.is_none() &&
952 // Representation optimizations are allowed.
953 !def.repr().inhibit_enum_layout_opt());
955 // Struct, or univariant enum equivalent to a struct.
956 // (Typechecking will reject discriminant-sizing attrs.)
958 let v = present_first;
959 let kind = if def.is_enum() || variants[v].is_empty() {
960 StructKind::AlwaysSized
962 let param_env = tcx.param_env(def.did());
963 let last_field = def.variant(v).fields.last().unwrap();
965 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
967 StructKind::MaybeUnsized
969 StructKind::AlwaysSized
973 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
974 st.variants = Variants::Single { index: v };
976 if def.is_unsafe_cell() {
977 let hide_niches = |scalar: &mut _| match scalar {
978 Scalar::Initialized { value, valid_range } => {
979 *valid_range = WrappingRange::full(value.size(dl))
981 // Already doesn't have any niches
982 Scalar::Union { .. } => {}
985 Abi::Uninhabited => {}
986 Abi::Scalar(scalar) => hide_niches(scalar),
987 Abi::ScalarPair(a, b) => {
991 Abi::Vector { element, count: _ } => hide_niches(element),
992 Abi::Aggregate { sized: _ } => {}
994 st.largest_niche = None;
995 return Ok(tcx.intern_layout(st));
998 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1000 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1001 // the asserts ensure that we are not using the
1002 // `#[rustc_layout_scalar_valid_range(n)]`
1003 // attribute to widen the range of anything as that would probably
1004 // result in UB somewhere
1005 // FIXME(eddyb) the asserts are probably not needed,
1006 // as larger validity ranges would result in missed
1007 // optimizations, *not* wrongly assuming the inner
1008 // value is valid. e.g. unions enlarge validity ranges,
1009 // because the values may be uninitialized.
1010 if let Bound::Included(start) = start {
1011 // FIXME(eddyb) this might be incorrect - it doesn't
1012 // account for wrap-around (end < start) ranges.
1013 let valid_range = scalar.valid_range_mut();
1014 assert!(valid_range.start <= start);
1015 valid_range.start = start;
1017 if let Bound::Included(end) = end {
1018 // FIXME(eddyb) this might be incorrect - it doesn't
1019 // account for wrap-around (end < start) ranges.
1020 let valid_range = scalar.valid_range_mut();
1021 assert!(valid_range.end >= end);
1022 valid_range.end = end;
1025 // Update `largest_niche` if we have introduced a larger niche.
1026 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1027 if let Some(niche) = niche {
1028 match st.largest_niche {
1029 Some(largest_niche) => {
1030 // Replace the existing niche even if they're equal,
1031 // because this one is at a lower offset.
1032 if largest_niche.available(dl) <= niche.available(dl) {
1033 st.largest_niche = Some(niche);
1036 None => st.largest_niche = Some(niche),
1041 start == Bound::Unbounded && end == Bound::Unbounded,
1042 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1048 return Ok(tcx.intern_layout(st));
1051 // At this point, we have handled all unions and
1052 // structs. (We have also handled univariant enums
1053 // that allow representation optimization.)
1054 assert!(def.is_enum());
1056 // Until we've decided whether to use the tagged or
1057 // niche filling LayoutS, we don't want to intern the
1058 // variant layouts, so we can't store them in the
1059 // overall LayoutS. Store the overall LayoutS
1060 // and the variant LayoutSs here until then.
1061 struct TmpLayout<'tcx> {
1062 layout: LayoutS<'tcx>,
1063 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1066 let calculate_niche_filling_layout =
1067 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1068 // The current code for niche-filling relies on variant indices
1069 // instead of actual discriminants, so enums with
1070 // explicit discriminants (RFC #2363) would misbehave.
1071 if def.repr().inhibit_enum_layout_opt()
1075 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1080 if variants.len() < 2 {
1084 let mut align = dl.aggregate_align;
1085 let mut variant_layouts = variants
1088 let mut st = self.univariant_uninterned(
1092 StructKind::AlwaysSized,
1094 st.variants = Variants::Single { index: j };
1096 align = align.max(st.align);
1100 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1102 let largest_variant_index = match variant_layouts
1104 .max_by_key(|(_i, layout)| layout.size.bytes())
1105 .map(|(i, _layout)| i)
1107 None => return Ok(None),
1111 let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1112 let needs_disc = |index: VariantIdx| {
1113 index != largest_variant_index && !absent(&variants[index])
1115 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1116 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1118 let count = niche_variants.size_hint().1.unwrap() as u128;
1120 // Find the field with the largest niche
1121 let (field_index, niche, (niche_start, niche_scalar)) = match variants
1122 [largest_variant_index]
1125 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1126 .max_by_key(|(_, niche)| niche.available(dl))
1127 .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1129 None => return Ok(None),
1133 let niche_offset = niche.offset
1134 + variant_layouts[largest_variant_index].fields.offset(field_index);
1135 let niche_size = niche.value.size(dl);
1136 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1138 let all_variants_fit =
1139 variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1140 if i == largest_variant_index {
1144 layout.largest_niche = None;
1146 if layout.size <= niche_offset {
1147 // This variant will fit before the niche.
1151 // Determine if it'll fit after the niche.
1152 let this_align = layout.align.abi;
1153 let this_offset = (niche_offset + niche_size).align_to(this_align);
1155 if this_offset + layout.size > size {
1159 // It'll fit, but we need to make some adjustments.
1160 match layout.fields {
1161 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1162 for (j, offset) in offsets.iter_mut().enumerate() {
1163 if !variants[i][j].is_zst() {
1164 *offset += this_offset;
1169 panic!("Layout of fields should be Arbitrary for variants")
1173 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1174 if !layout.abi.is_uninhabited() {
1175 layout.abi = Abi::Aggregate { sized: true };
1177 layout.size += this_offset;
1182 if !all_variants_fit {
1186 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1188 let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1189 i == largest_variant_index || layout.size == Size::ZERO
1191 let same_size = size == variant_layouts[largest_variant_index].size;
1192 let same_align = align == variant_layouts[largest_variant_index].align;
1194 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1196 } else if same_size && same_align && others_zst {
1197 match variant_layouts[largest_variant_index].abi {
1198 // When the total alignment and size match, we can use the
1199 // same ABI as the scalar variant with the reserved niche.
1200 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1201 Abi::ScalarPair(first, second) => {
1202 // Only the niche is guaranteed to be initialised,
1203 // so use union layouts for the other primitive.
1204 if niche_offset == Size::ZERO {
1205 Abi::ScalarPair(niche_scalar, second.to_union())
1207 Abi::ScalarPair(first.to_union(), niche_scalar)
1210 _ => Abi::Aggregate { sized: true },
1213 Abi::Aggregate { sized: true }
1216 let layout = LayoutS {
1217 variants: Variants::Multiple {
1219 tag_encoding: TagEncoding::Niche {
1220 untagged_variant: largest_variant_index,
1225 variants: IndexVec::new(),
1227 fields: FieldsShape::Arbitrary {
1228 offsets: vec![niche_offset],
1229 memory_index: vec![0],
1237 Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1240 let niche_filling_layout = calculate_niche_filling_layout()?;
1242 let (mut min, mut max) = (i128::MAX, i128::MIN);
1243 let discr_type = def.repr().discr_type();
1244 let bits = Integer::from_attr(self, discr_type).size().bits();
1245 for (i, discr) in def.discriminants(tcx) {
1246 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1249 let mut x = discr.val as i128;
1250 if discr_type.is_signed() {
1251 // sign extend the raw representation to be an i128
1252 x = (x << (128 - bits)) >> (128 - bits);
1261 // We might have no inhabited variants, so pretend there's at least one.
1262 if (min, max) == (i128::MAX, i128::MIN) {
1266 assert!(min <= max, "discriminant range is {}...{}", min, max);
1267 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1269 let mut align = dl.aggregate_align;
1270 let mut size = Size::ZERO;
1272 // We're interested in the smallest alignment, so start large.
1273 let mut start_align = Align::from_bytes(256).unwrap();
1274 assert_eq!(Integer::for_align(dl, start_align), None);
1276 // repr(C) on an enum tells us to make a (tag, union) layout,
1277 // so we need to grow the prefix alignment to be at least
1278 // the alignment of the union. (This value is used both for
1279 // determining the alignment of the overall enum, and the
1280 // determining the alignment of the payload after the tag.)
1281 let mut prefix_align = min_ity.align(dl).abi;
1283 for fields in &variants {
1284 for field in fields {
1285 prefix_align = prefix_align.max(field.align.abi);
1290 // Create the set of structs that represent each variant.
1291 let mut layout_variants = variants
1293 .map(|(i, field_layouts)| {
1294 let mut st = self.univariant_uninterned(
1298 StructKind::Prefixed(min_ity.size(), prefix_align),
1300 st.variants = Variants::Single { index: i };
1301 // Find the first field we can't move later
1302 // to make room for a larger discriminant.
1304 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1306 if !field.is_zst() || field.align.abi.bytes() != 1 {
1307 start_align = start_align.min(field.align.abi);
1311 size = cmp::max(size, st.size);
1312 align = align.max(st.align);
1315 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1317 // Align the maximum variant size to the largest alignment.
1318 size = size.align_to(align.abi);
1320 if size.bytes() >= dl.obj_size_bound() {
1321 return Err(LayoutError::SizeOverflow(ty));
1324 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1325 if typeck_ity < min_ity {
1326 // It is a bug if Layout decided on a greater discriminant size than typeck for
1327 // some reason at this point (based on values discriminant can take on). Mostly
1328 // because this discriminant will be loaded, and then stored into variable of
1329 // type calculated by typeck. Consider such case (a bug): typeck decided on
1330 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1331 // discriminant values. That would be a bug, because then, in codegen, in order
1332 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1333 // space necessary to represent would have to be discarded (or layout is wrong
1334 // on thinking it needs 16 bits)
1336 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1340 // However, it is fine to make discr type however large (as an optimisation)
1341 // after this point – we’ll just truncate the value we load in codegen.
1344 // Check to see if we should use a different type for the
1345 // discriminant. We can safely use a type with the same size
1346 // as the alignment of the first field of each variant.
1347 // We increase the size of the discriminant to avoid LLVM copying
1348 // padding when it doesn't need to. This normally causes unaligned
1349 // load/stores and excessive memcpy/memset operations. By using a
1350 // bigger integer size, LLVM can be sure about its contents and
1351 // won't be so conservative.
1353 // Use the initial field alignment
1354 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1357 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1360 // If the alignment is not larger than the chosen discriminant size,
1361 // don't use the alignment as the final size.
1365 // Patch up the variants' first few fields.
1366 let old_ity_size = min_ity.size();
1367 let new_ity_size = ity.size();
1368 for variant in &mut layout_variants {
1369 match variant.fields {
1370 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1372 if *i <= old_ity_size {
1373 assert_eq!(*i, old_ity_size);
1377 // We might be making the struct larger.
1378 if variant.size <= old_ity_size {
1379 variant.size = new_ity_size;
1387 let tag_mask = ity.size().unsigned_int_max();
1388 let tag = Scalar::Initialized {
1389 value: Int(ity, signed),
1390 valid_range: WrappingRange {
1391 start: (min as u128 & tag_mask),
1392 end: (max as u128 & tag_mask),
1395 let mut abi = Abi::Aggregate { sized: true };
1397 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1398 abi = Abi::Uninhabited;
1399 } else if tag.size(dl) == size {
1400 // Make sure we only use scalar layout when the enum is entirely its
1401 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1402 abi = Abi::Scalar(tag);
1404 // Try to use a ScalarPair for all tagged enums.
1405 let mut common_prim = None;
1406 let mut common_prim_initialized_in_all_variants = true;
1407 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1408 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1412 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1413 let (field, offset) = match (fields.next(), fields.next()) {
1415 common_prim_initialized_in_all_variants = false;
1418 (Some(pair), None) => pair,
1424 let prim = match field.abi {
1425 Abi::Scalar(scalar) => {
1426 common_prim_initialized_in_all_variants &=
1427 matches!(scalar, Scalar::Initialized { .. });
1435 if let Some(pair) = common_prim {
1436 // This is pretty conservative. We could go fancier
1437 // by conflating things like i32 and u32, or even
1438 // realising that (u8, u8) could just cohabit with
1440 if pair != (prim, offset) {
1445 common_prim = Some((prim, offset));
1448 if let Some((prim, offset)) = common_prim {
1449 let prim_scalar = if common_prim_initialized_in_all_variants {
1452 // Common prim might be uninit.
1453 Scalar::Union { value: prim }
1455 let pair = self.scalar_pair(tag, prim_scalar);
1456 let pair_offsets = match pair.fields {
1457 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1458 assert_eq!(memory_index, &[0, 1]);
1463 if pair_offsets[0] == Size::ZERO
1464 && pair_offsets[1] == *offset
1465 && align == pair.align
1466 && size == pair.size
1468 // We can use `ScalarPair` only when it matches our
1469 // already computed layout (including `#[repr(C)]`).
1475 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1476 // variants to ensure they are consistent. This is because a downcast is
1477 // semantically a NOP, and thus should not affect layout.
1478 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1479 for variant in &mut layout_variants {
1480 // We only do this for variants with fields; the others are not accessed anyway.
1481 // Also do not overwrite any already existing "clever" ABIs.
1482 if variant.fields.count() > 0
1483 && matches!(variant.abi, Abi::Aggregate { .. })
1486 // Also need to bump up the size and alignment, so that the entire value fits in here.
1487 variant.size = cmp::max(variant.size, size);
1488 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1493 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1495 let tagged_layout = LayoutS {
1496 variants: Variants::Multiple {
1498 tag_encoding: TagEncoding::Direct,
1500 variants: IndexVec::new(),
1502 fields: FieldsShape::Arbitrary {
1503 offsets: vec![Size::ZERO],
1504 memory_index: vec![0],
1512 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1514 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1516 // Pick the smaller layout; otherwise,
1517 // pick the layout with the larger niche; otherwise,
1518 // pick tagged as it has simpler codegen.
1520 let niche_size = |tmp_l: &TmpLayout<'_>| {
1521 tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1524 tl.layout.size.cmp(&nl.layout.size),
1525 niche_size(&tl).cmp(&niche_size(&nl)),
1528 (Equal, Less) => nl,
1535 // Now we can intern the variant layouts and store them in the enum layout.
1536 best_layout.layout.variants = match best_layout.layout.variants {
1537 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1541 variants: best_layout
1544 .map(|layout| tcx.intern_layout(layout))
1550 tcx.intern_layout(best_layout.layout)
1553 // Types with no meaningful known layout.
1554 ty::Projection(_) | ty::Opaque(..) => {
1555 // NOTE(eddyb) `layout_of` query should've normalized these away,
1556 // if that was possible, so there's no reason to try again here.
1557 return Err(LayoutError::Unknown(ty));
1560 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1561 bug!("Layout::compute: unexpected type `{}`", ty)
1564 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1565 return Err(LayoutError::Unknown(ty));
1571 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1572 #[derive(Clone, Debug, PartialEq)]
1573 enum SavedLocalEligibility {
1575 Assigned(VariantIdx),
1576 // FIXME: Use newtype_index so we aren't wasting bytes
1577 Ineligible(Option<u32>),
1580 // When laying out generators, we divide our saved local fields into two
1581 // categories: overlap-eligible and overlap-ineligible.
1583 // Those fields which are ineligible for overlap go in a "prefix" at the
1584 // beginning of the layout, and always have space reserved for them.
1586 // Overlap-eligible fields are only assigned to one variant, so we lay
1587 // those fields out for each variant and put them right after the
1590 // Finally, in the layout details, we point to the fields from the
1591 // variants they are assigned to. It is possible for some fields to be
1592 // included in multiple variants. No field ever "moves around" in the
1593 // layout; its offset is always the same.
1595 // Also included in the layout are the upvars and the discriminant.
1596 // These are included as fields on the "outer" layout; they are not part
1598 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1599 /// Compute the eligibility and assignment of each local.
1600 fn generator_saved_local_eligibility(
1602 info: &GeneratorLayout<'tcx>,
1603 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1604 use SavedLocalEligibility::*;
1606 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1607 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1609 // The saved locals not eligible for overlap. These will get
1610 // "promoted" to the prefix of our generator.
1611 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1613 // Figure out which of our saved locals are fields in only
1614 // one variant. The rest are deemed ineligible for overlap.
1615 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1616 for local in fields {
1617 match assignments[*local] {
1619 assignments[*local] = Assigned(variant_index);
1622 // We've already seen this local at another suspension
1623 // point, so it is no longer a candidate.
1625 "removing local {:?} in >1 variant ({:?}, {:?})",
1630 ineligible_locals.insert(*local);
1631 assignments[*local] = Ineligible(None);
1638 // Next, check every pair of eligible locals to see if they
1640 for local_a in info.storage_conflicts.rows() {
1641 let conflicts_a = info.storage_conflicts.count(local_a);
1642 if ineligible_locals.contains(local_a) {
1646 for local_b in info.storage_conflicts.iter(local_a) {
1647 // local_a and local_b are storage live at the same time, therefore they
1648 // cannot overlap in the generator layout. The only way to guarantee
1649 // this is if they are in the same variant, or one is ineligible
1650 // (which means it is stored in every variant).
1651 if ineligible_locals.contains(local_b)
1652 || assignments[local_a] == assignments[local_b]
1657 // If they conflict, we will choose one to make ineligible.
1658 // This is not always optimal; it's just a greedy heuristic that
1659 // seems to produce good results most of the time.
1660 let conflicts_b = info.storage_conflicts.count(local_b);
1661 let (remove, other) =
1662 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1663 ineligible_locals.insert(remove);
1664 assignments[remove] = Ineligible(None);
1665 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1669 // Count the number of variants in use. If only one of them, then it is
1670 // impossible to overlap any locals in our layout. In this case it's
1671 // always better to make the remaining locals ineligible, so we can
1672 // lay them out with the other locals in the prefix and eliminate
1673 // unnecessary padding bytes.
1675 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1676 for assignment in &assignments {
1677 if let Assigned(idx) = assignment {
1678 used_variants.insert(*idx);
1681 if used_variants.count() < 2 {
1682 for assignment in assignments.iter_mut() {
1683 *assignment = Ineligible(None);
1685 ineligible_locals.insert_all();
1689 // Write down the order of our locals that will be promoted to the prefix.
1691 for (idx, local) in ineligible_locals.iter().enumerate() {
1692 assignments[local] = Ineligible(Some(idx as u32));
1695 debug!("generator saved local assignments: {:?}", assignments);
1697 (ineligible_locals, assignments)
1700 /// Compute the full generator layout.
1701 fn generator_layout(
1704 def_id: hir::def_id::DefId,
1705 substs: SubstsRef<'tcx>,
1706 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1707 use SavedLocalEligibility::*;
1709 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1711 let Some(info) = tcx.generator_layout(def_id) else {
1712 return Err(LayoutError::Unknown(ty));
1714 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1716 // Build a prefix layout, including "promoting" all ineligible
1717 // locals as part of the prefix. We compute the layout of all of
1718 // these fields at once to get optimal packing.
1719 let tag_index = substs.as_generator().prefix_tys().count();
1721 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1722 let max_discr = (info.variant_fields.len() - 1) as u128;
1723 let discr_int = Integer::fit_unsigned(max_discr);
1724 let discr_int_ty = discr_int.to_ty(tcx, false);
1725 let tag = Scalar::Initialized {
1726 value: Primitive::Int(discr_int, false),
1727 valid_range: WrappingRange { start: 0, end: max_discr },
1729 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1730 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1732 let promoted_layouts = ineligible_locals
1734 .map(|local| subst_field(info.field_tys[local]))
1735 .map(|ty| tcx.mk_maybe_uninit(ty))
1736 .map(|ty| self.layout_of(ty));
1737 let prefix_layouts = substs
1740 .map(|ty| self.layout_of(ty))
1741 .chain(iter::once(Ok(tag_layout)))
1742 .chain(promoted_layouts)
1743 .collect::<Result<Vec<_>, _>>()?;
1744 let prefix = self.univariant_uninterned(
1747 &ReprOptions::default(),
1748 StructKind::AlwaysSized,
1751 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1753 // Split the prefix layout into the "outer" fields (upvars and
1754 // discriminant) and the "promoted" fields. Promoted fields will
1755 // get included in each variant that requested them in
1757 debug!("prefix = {:#?}", prefix);
1758 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1759 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1760 let mut inverse_memory_index = invert_mapping(&memory_index);
1762 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1763 // "outer" and "promoted" fields respectively.
1764 let b_start = (tag_index + 1) as u32;
1765 let offsets_b = offsets.split_off(b_start as usize);
1766 let offsets_a = offsets;
1768 // Disentangle the "a" and "b" components of `inverse_memory_index`
1769 // by preserving the order but keeping only one disjoint "half" each.
1770 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1771 let inverse_memory_index_b: Vec<_> =
1772 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1773 inverse_memory_index.retain(|&i| i < b_start);
1774 let inverse_memory_index_a = inverse_memory_index;
1776 // Since `inverse_memory_index_{a,b}` each only refer to their
1777 // respective fields, they can be safely inverted
1778 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1779 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1782 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1783 (outer_fields, offsets_b, memory_index_b)
1788 let mut size = prefix.size;
1789 let mut align = prefix.align;
1793 .map(|(index, variant_fields)| {
1794 // Only include overlap-eligible fields when we compute our variant layout.
1795 let variant_only_tys = variant_fields
1797 .filter(|local| match assignments[**local] {
1798 Unassigned => bug!(),
1799 Assigned(v) if v == index => true,
1800 Assigned(_) => bug!("assignment does not match variant"),
1801 Ineligible(_) => false,
1803 .map(|local| subst_field(info.field_tys[*local]));
1805 let mut variant = self.univariant_uninterned(
1808 .map(|ty| self.layout_of(ty))
1809 .collect::<Result<Vec<_>, _>>()?,
1810 &ReprOptions::default(),
1811 StructKind::Prefixed(prefix_size, prefix_align.abi),
1813 variant.variants = Variants::Single { index };
1815 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1819 // Now, stitch the promoted and variant-only fields back together in
1820 // the order they are mentioned by our GeneratorLayout.
1821 // Because we only use some subset (that can differ between variants)
1822 // of the promoted fields, we can't just pick those elements of the
1823 // `promoted_memory_index` (as we'd end up with gaps).
1824 // So instead, we build an "inverse memory_index", as if all of the
1825 // promoted fields were being used, but leave the elements not in the
1826 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1827 // obtain a valid (bijective) mapping.
1828 const INVALID_FIELD_IDX: u32 = !0;
1829 let mut combined_inverse_memory_index =
1830 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1831 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1832 let combined_offsets = variant_fields
1836 let (offset, memory_index) = match assignments[*local] {
1837 Unassigned => bug!(),
1839 let (offset, memory_index) =
1840 offsets_and_memory_index.next().unwrap();
1841 (offset, promoted_memory_index.len() as u32 + memory_index)
1843 Ineligible(field_idx) => {
1844 let field_idx = field_idx.unwrap() as usize;
1845 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1848 combined_inverse_memory_index[memory_index as usize] = i as u32;
1853 // Remove the unused slots and invert the mapping to obtain the
1854 // combined `memory_index` (also see previous comment).
1855 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1856 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1858 variant.fields = FieldsShape::Arbitrary {
1859 offsets: combined_offsets,
1860 memory_index: combined_memory_index,
1863 size = size.max(variant.size);
1864 align = align.max(variant.align);
1865 Ok(tcx.intern_layout(variant))
1867 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1869 size = size.align_to(align.abi);
1872 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1875 Abi::Aggregate { sized: true }
1878 let layout = tcx.intern_layout(LayoutS {
1879 variants: Variants::Multiple {
1881 tag_encoding: TagEncoding::Direct,
1882 tag_field: tag_index,
1885 fields: outer_fields,
1887 largest_niche: prefix.largest_niche,
1891 debug!("generator layout ({:?}): {:#?}", ty, layout);
1895 /// This is invoked by the `layout_of` query to record the final
1896 /// layout of each type.
1898 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1899 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1900 // for dumping later.
1901 if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1902 self.record_layout_for_printing_outlined(layout)
1906 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1907 // Ignore layouts that are done with non-empty environments or
1908 // non-monomorphic layouts, as the user only wants to see the stuff
1909 // resulting from the final codegen session.
1910 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1914 // (delay format until we actually need it)
1915 let record = |kind, packed, opt_discr_size, variants| {
1916 let type_desc = format!("{:?}", layout.ty);
1917 self.tcx.sess.code_stats.record_type_size(
1928 let adt_def = match *layout.ty.kind() {
1929 ty::Adt(ref adt_def, _) => {
1930 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1934 ty::Closure(..) => {
1935 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1936 record(DataTypeKind::Closure, false, None, vec![]);
1941 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1946 let adt_kind = adt_def.adt_kind();
1947 let adt_packed = adt_def.repr().pack.is_some();
1949 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1950 let mut min_size = Size::ZERO;
1951 let field_info: Vec<_> = flds
1955 let field_layout = layout.field(self, i);
1956 let offset = layout.fields.offset(i);
1957 let field_end = offset + field_layout.size;
1958 if min_size < field_end {
1959 min_size = field_end;
1963 offset: offset.bytes(),
1964 size: field_layout.size.bytes(),
1965 align: field_layout.align.abi.bytes(),
1972 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1973 align: layout.align.abi.bytes(),
1974 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1979 match layout.variants {
1980 Variants::Single { index } => {
1981 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1983 "print-type-size `{:#?}` variant {}",
1985 adt_def.variant(index).name
1987 let variant_def = &adt_def.variant(index);
1988 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1993 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1996 // (This case arises for *empty* enums; so give it
1998 record(adt_kind.into(), adt_packed, None, vec![]);
2002 Variants::Multiple { tag, ref tag_encoding, .. } => {
2004 "print-type-size `{:#?}` adt general variants def {}",
2006 adt_def.variants().len()
2008 let variant_infos: Vec<_> = adt_def
2011 .map(|(i, variant_def)| {
2012 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2014 Some(variant_def.name),
2016 layout.for_variant(self, i),
2023 match tag_encoding {
2024 TagEncoding::Direct => Some(tag.size(self)),
2034 /// Type size "skeleton", i.e., the only information determining a type's size.
2035 /// While this is conservative, (aside from constant sizes, only pointers,
2036 /// newtypes thereof and null pointer optimized enums are allowed), it is
2037 /// enough to statically check common use cases of transmute.
2038 #[derive(Copy, Clone, Debug)]
2039 pub enum SizeSkeleton<'tcx> {
2040 /// Any statically computable Layout.
2043 /// A potentially-fat pointer.
2045 /// If true, this pointer is never null.
2047 /// The type which determines the unsized metadata, if any,
2048 /// of this pointer. Either a type parameter or a projection
2049 /// depending on one, with regions erased.
2054 impl<'tcx> SizeSkeleton<'tcx> {
2058 param_env: ty::ParamEnv<'tcx>,
2059 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2060 debug_assert!(!ty.has_infer_types_or_consts());
2062 // First try computing a static layout.
2063 let err = match tcx.layout_of(param_env.and(ty)) {
2065 return Ok(SizeSkeleton::Known(layout.size));
2071 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2072 let non_zero = !ty.is_unsafe_ptr();
2073 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2075 ty::Param(_) | ty::Projection(_) => {
2076 debug_assert!(tail.has_param_types_or_consts());
2077 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2080 "SizeSkeleton::compute({}): layout errored ({}), yet \
2081 tail `{}` is not a type parameter or a projection",
2089 ty::Adt(def, substs) => {
2090 // Only newtypes and enums w/ nullable pointer optimization.
2091 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2095 // Get a zero-sized variant or a pointer newtype.
2096 let zero_or_ptr_variant = |i| {
2097 let i = VariantIdx::new(i);
2099 def.variant(i).fields.iter().map(|field| {
2100 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2103 for field in fields {
2106 SizeSkeleton::Known(size) => {
2107 if size.bytes() > 0 {
2111 SizeSkeleton::Pointer { .. } => {
2122 let v0 = zero_or_ptr_variant(0)?;
2124 if def.variants().len() == 1 {
2125 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2126 return Ok(SizeSkeleton::Pointer {
2128 || match tcx.layout_scalar_valid_range(def.did()) {
2129 (Bound::Included(start), Bound::Unbounded) => start > 0,
2130 (Bound::Included(start), Bound::Included(end)) => {
2131 0 < start && start < end
2142 let v1 = zero_or_ptr_variant(1)?;
2143 // Nullable pointer enum optimization.
2145 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2146 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2147 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2153 ty::Projection(_) | ty::Opaque(..) => {
2154 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2155 if ty == normalized {
2158 SizeSkeleton::compute(normalized, tcx, param_env)
2166 pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2167 match (self, other) {
2168 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2169 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2177 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2178 fn tcx(&self) -> TyCtxt<'tcx>;
2181 pub trait HasParamEnv<'tcx> {
2182 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2185 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2187 fn data_layout(&self) -> &TargetDataLayout {
2192 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2193 fn target_spec(&self) -> &Target {
2198 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2200 fn tcx(&self) -> TyCtxt<'tcx> {
2205 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2207 fn data_layout(&self) -> &TargetDataLayout {
2212 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2213 fn target_spec(&self) -> &Target {
2218 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2220 fn tcx(&self) -> TyCtxt<'tcx> {
2225 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2226 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2231 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2232 fn data_layout(&self) -> &TargetDataLayout {
2233 self.tcx.data_layout()
2237 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2238 fn target_spec(&self) -> &Target {
2239 self.tcx.target_spec()
2243 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2244 fn tcx(&self) -> TyCtxt<'tcx> {
2249 pub trait MaybeResult<T> {
2252 fn from(x: Result<T, Self::Error>) -> Self;
2253 fn to_result(self) -> Result<T, Self::Error>;
2256 impl<T> MaybeResult<T> for T {
2259 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2262 fn to_result(self) -> Result<T, Self::Error> {
2267 impl<T, E> MaybeResult<T> for Result<T, E> {
2270 fn from(x: Result<T, Self::Error>) -> Self {
2273 fn to_result(self) -> Result<T, Self::Error> {
2278 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2280 /// Trait for contexts that want to be able to compute layouts of types.
2281 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2282 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2283 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2284 /// returned from `layout_of` (see also `handle_layout_err`).
2285 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2287 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2288 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2290 fn layout_tcx_at_span(&self) -> Span {
2294 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2295 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2297 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2298 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2299 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2300 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2301 fn handle_layout_err(
2303 err: LayoutError<'tcx>,
2306 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2309 /// Blanket extension trait for contexts that can compute layouts of types.
2310 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2311 /// Computes the layout of a type. Note that this implicitly
2312 /// executes in "reveal all" mode, and will normalize the input type.
2314 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2315 self.spanned_layout_of(ty, DUMMY_SP)
2318 /// Computes the layout of a type, at `span`. Note that this implicitly
2319 /// executes in "reveal all" mode, and will normalize the input type.
2320 // FIXME(eddyb) avoid passing information like this, and instead add more
2321 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2323 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2324 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2325 let tcx = self.tcx().at(span);
2328 tcx.layout_of(self.param_env().and(ty))
2329 .map_err(|err| self.handle_layout_err(err, span, ty)),
2334 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2336 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2337 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2340 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2345 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2346 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2349 fn layout_tcx_at_span(&self) -> Span {
2354 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2359 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2361 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2363 fn ty_and_layout_for_variant(
2364 this: TyAndLayout<'tcx>,
2366 variant_index: VariantIdx,
2367 ) -> TyAndLayout<'tcx> {
2368 let layout = match this.variants {
2369 Variants::Single { index }
2370 // If all variants but one are uninhabited, the variant layout is the enum layout.
2371 if index == variant_index &&
2372 // Don't confuse variants of uninhabited enums with the enum itself.
2373 // For more details see https://github.com/rust-lang/rust/issues/69763.
2374 this.fields != FieldsShape::Primitive =>
2379 Variants::Single { index } => {
2381 let param_env = cx.param_env();
2383 // Deny calling for_variant more than once for non-Single enums.
2384 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2385 assert_eq!(original_layout.variants, Variants::Single { index });
2388 let fields = match this.ty.kind() {
2389 ty::Adt(def, _) if def.variants().is_empty() =>
2390 bug!("for_variant called on zero-variant enum"),
2391 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2394 tcx.intern_layout(LayoutS {
2395 variants: Variants::Single { index: variant_index },
2396 fields: match NonZeroUsize::new(fields) {
2397 Some(fields) => FieldsShape::Union(fields),
2398 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2400 abi: Abi::Uninhabited,
2401 largest_niche: None,
2402 align: tcx.data_layout.i8_align,
2407 Variants::Multiple { ref variants, .. } => variants[variant_index],
2410 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2412 TyAndLayout { ty: this.ty, layout }
2415 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2416 enum TyMaybeWithLayout<'tcx> {
2418 TyAndLayout(TyAndLayout<'tcx>),
2421 fn field_ty_or_layout<'tcx>(
2422 this: TyAndLayout<'tcx>,
2423 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2425 ) -> TyMaybeWithLayout<'tcx> {
2427 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2429 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2430 ty: tag.primitive().to_ty(tcx),
2434 match *this.ty.kind() {
2443 | ty::GeneratorWitness(..)
2445 | ty::Dynamic(_, _, ty::Dyn) => {
2446 bug!("TyAndLayout::field({:?}): not applicable", this)
2449 // Potentially-fat pointers.
2450 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2451 assert!(i < this.fields.count());
2453 // Reuse the fat `*T` type as its own thin pointer data field.
2454 // This provides information about, e.g., DST struct pointees
2455 // (which may have no non-DST form), and will work as long
2456 // as the `Abi` or `FieldsShape` is checked by users.
2458 let nil = tcx.mk_unit();
2459 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2462 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2465 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2466 // the `Result` should always work because the type is
2467 // always either `*mut ()` or `&'static mut ()`.
2468 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2470 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2474 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2475 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2476 ty::Dynamic(_, _, ty::Dyn) => {
2477 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2478 tcx.lifetimes.re_static,
2479 tcx.mk_array(tcx.types.usize, 3),
2481 /* FIXME: use actual fn pointers
2482 Warning: naively computing the number of entries in the
2483 vtable by counting the methods on the trait + methods on
2484 all parent traits does not work, because some methods can
2485 be not object safe and thus excluded from the vtable.
2486 Increase this counter if you tried to implement this but
2487 failed to do it without duplicating a lot of code from
2488 other places in the compiler: 2
2490 tcx.mk_array(tcx.types.usize, 3),
2491 tcx.mk_array(Option<fn()>),
2495 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2499 // Arrays and slices.
2500 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2501 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2503 // Tuples, generators and closures.
2504 ty::Closure(_, ref substs) => field_ty_or_layout(
2505 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2510 ty::Generator(def_id, ref substs, _) => match this.variants {
2511 Variants::Single { index } => TyMaybeWithLayout::Ty(
2514 .state_tys(def_id, tcx)
2515 .nth(index.as_usize())
2520 Variants::Multiple { tag, tag_field, .. } => {
2522 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2524 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2528 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2531 ty::Adt(def, substs) => {
2532 match this.variants {
2533 Variants::Single { index } => {
2534 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2537 // Discriminant field for enums (where applicable).
2538 Variants::Multiple { tag, .. } => {
2540 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2545 ty::Dynamic(_, _, ty::DynStar) => {
2547 TyMaybeWithLayout::Ty(tcx.types.usize)
2549 // FIXME(dyn-star) same FIXME as above applies here too
2550 TyMaybeWithLayout::Ty(
2552 tcx.lifetimes.re_static,
2553 tcx.mk_array(tcx.types.usize, 3),
2557 bug!("no field {i} on dyn*")
2563 | ty::Placeholder(..)
2567 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2571 match field_ty_or_layout(this, cx, i) {
2572 TyMaybeWithLayout::Ty(field_ty) => {
2573 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2575 "failed to get layout for `{}`: {},\n\
2576 despite it being a field (#{}) of an existing layout: {:#?}",
2584 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2588 fn ty_and_layout_pointee_info_at(
2589 this: TyAndLayout<'tcx>,
2592 ) -> Option<PointeeInfo> {
2594 let param_env = cx.param_env();
2596 let addr_space_of_ty = |ty: Ty<'tcx>| {
2597 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2600 let pointee_info = match *this.ty.kind() {
2601 ty::RawPtr(mt) if offset.bytes() == 0 => {
2602 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2604 align: layout.align.abi,
2606 address_space: addr_space_of_ty(mt.ty),
2609 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2610 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2612 align: layout.align.abi,
2614 address_space: cx.data_layout().instruction_address_space,
2617 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2618 let address_space = addr_space_of_ty(ty);
2619 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2620 // Use conservative pointer kind if not optimizing. This saves us the
2621 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2622 // attributes in LLVM have compile-time cost even in unoptimized builds).
2623 PointerKind::SharedMutable
2626 hir::Mutability::Not => {
2627 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2630 PointerKind::SharedMutable
2633 hir::Mutability::Mut => {
2634 // References to self-referential structures should not be considered
2635 // noalias, as another pointer to the structure can be obtained, that
2636 // is not based-on the original reference. We consider all !Unpin
2637 // types to be potentially self-referential here.
2638 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2639 PointerKind::UniqueBorrowed
2641 PointerKind::UniqueBorrowedPinned
2647 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2649 align: layout.align.abi,
2656 let mut data_variant = match this.variants {
2657 // Within the discriminant field, only the niche itself is
2658 // always initialized, so we only check for a pointer at its
2661 // If the niche is a pointer, it's either valid (according
2662 // to its type), or null (which the niche field's scalar
2663 // validity range encodes). This allows using
2664 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2665 // this will continue to work as long as we don't start
2666 // using more niches than just null (e.g., the first page of
2667 // the address space, or unaligned pointers).
2668 Variants::Multiple {
2669 tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2672 } if this.fields.offset(tag_field) == offset => {
2673 Some(this.for_variant(cx, untagged_variant))
2678 if let Some(variant) = data_variant {
2679 // We're not interested in any unions.
2680 if let FieldsShape::Union(_) = variant.fields {
2681 data_variant = None;
2685 let mut result = None;
2687 if let Some(variant) = data_variant {
2688 let ptr_end = offset + Pointer.size(cx);
2689 for i in 0..variant.fields.count() {
2690 let field_start = variant.fields.offset(i);
2691 if field_start <= offset {
2692 let field = variant.field(cx, i);
2693 result = field.to_result().ok().and_then(|field| {
2694 if ptr_end <= field_start + field.size {
2695 // We found the right field, look inside it.
2697 field.pointee_info_at(cx, offset - field_start);
2703 if result.is_some() {
2710 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2711 if let Some(ref mut pointee) = result {
2712 if let ty::Adt(def, _) = this.ty.kind() {
2713 if def.is_box() && offset.bytes() == 0 {
2714 pointee.safe = Some(PointerKind::UniqueOwned);
2724 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2733 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2734 matches!(this.ty.kind(), ty::Adt(..))
2737 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2738 this.ty.kind() == &ty::Never
2741 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2742 matches!(this.ty.kind(), ty::Tuple(..))
2745 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2746 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2750 impl<'tcx> ty::Instance<'tcx> {
2751 // NOTE(eddyb) this is private to avoid using it from outside of
2752 // `fn_abi_of_instance` - any other uses are either too high-level
2753 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2754 // or should go through `FnAbi` instead, to avoid losing any
2755 // adjustments `fn_abi_of_instance` might be performing.
2756 #[tracing::instrument(level = "debug", skip(tcx, param_env))]
2757 fn fn_sig_for_fn_abi(
2760 param_env: ty::ParamEnv<'tcx>,
2761 ) -> ty::PolyFnSig<'tcx> {
2762 let ty = self.ty(tcx, param_env);
2765 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2766 // parameters unused if they show up in the signature, but not in the `mir::Body`
2767 // (i.e. due to being inside a projection that got normalized, see
2768 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2769 // track of a polymorphization `ParamEnv` to allow normalizing later.
2771 // We normalize the `fn_sig` again after substituting at a later point.
2772 let mut sig = match *ty.kind() {
2773 ty::FnDef(def_id, substs) => tcx
2774 .bound_fn_sig(def_id)
2775 .map_bound(|fn_sig| {
2776 tcx.normalize_erasing_regions(tcx.param_env(def_id), fn_sig)
2778 .subst(tcx, substs),
2779 _ => unreachable!(),
2782 if let ty::InstanceDef::VTableShim(..) = self.def {
2783 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2784 sig = sig.map_bound(|mut sig| {
2785 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2786 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2787 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2793 ty::Closure(def_id, substs) => {
2794 let sig = substs.as_closure().sig();
2796 let bound_vars = tcx.mk_bound_variable_kinds(
2799 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2801 let br = ty::BoundRegion {
2802 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2803 kind: ty::BoundRegionKind::BrEnv,
2805 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2806 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2808 let sig = sig.skip_binder();
2809 ty::Binder::bind_with_vars(
2811 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2820 ty::Generator(_, substs, _) => {
2821 let sig = substs.as_generator().poly_sig();
2823 let bound_vars = tcx.mk_bound_variable_kinds(
2826 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2828 let br = ty::BoundRegion {
2829 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2830 kind: ty::BoundRegionKind::BrEnv,
2832 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2833 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2835 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2836 let pin_adt_ref = tcx.adt_def(pin_did);
2837 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2838 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2840 let sig = sig.skip_binder();
2841 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2842 let state_adt_ref = tcx.adt_def(state_did);
2843 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2844 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2845 ty::Binder::bind_with_vars(
2847 [env_ty, sig.resume_ty].iter(),
2850 hir::Unsafety::Normal,
2851 rustc_target::spec::abi::Abi::Rust,
2856 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2861 /// Calculates whether a function's ABI can unwind or not.
2863 /// This takes two primary parameters:
2865 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2866 /// codegen attrs for a defined function. For function pointers this set of
2867 /// flags is the empty set. This is only applicable for Rust-defined
2868 /// functions, and generally isn't needed except for small optimizations where
2869 /// we try to say a function which otherwise might look like it could unwind
2870 /// doesn't actually unwind (such as for intrinsics and such).
2872 /// * `abi` - this is the ABI that the function is defined with. This is the
2873 /// primary factor for determining whether a function can unwind or not.
2875 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2876 /// panics are implemented with unwinds on most platform (when
2877 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2878 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2879 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2880 /// defined for each ABI individually, but it always corresponds to some form of
2881 /// stack-based unwinding (the exact mechanism of which varies
2882 /// platform-by-platform).
2884 /// Rust functions are classified whether or not they can unwind based on the
2885 /// active "panic strategy". In other words Rust functions are considered to
2886 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2887 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2888 /// only if the final panic mode is panic=abort. In this scenario any code
2889 /// previously compiled assuming that a function can unwind is still correct, it
2890 /// just never happens to actually unwind at runtime.
2892 /// This function's answer to whether or not a function can unwind is quite
2893 /// impactful throughout the compiler. This affects things like:
2895 /// * Calling a function which can't unwind means codegen simply ignores any
2896 /// associated unwinding cleanup.
2897 /// * Calling a function which can unwind from a function which can't unwind
2898 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2899 /// aborts the process.
2900 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2901 /// affects various optimizations and codegen.
2903 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2904 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2905 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2906 /// might (from a foreign exception or similar).
2908 #[tracing::instrument(level = "debug", skip(tcx))]
2909 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2910 if let Some(did) = fn_def_id {
2911 // Special attribute for functions which can't unwind.
2912 if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2916 // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2918 // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2919 // function defined in Rust is also required to abort.
2920 if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2924 // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2926 // This is not part of `codegen_fn_attrs` as it can differ between crates
2927 // and therefore cannot be computed in core.
2928 if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2929 if Some(did) == tcx.lang_items().drop_in_place_fn() {
2935 // Otherwise if this isn't special then unwinding is generally determined by
2936 // the ABI of the itself. ABIs like `C` have variants which also
2937 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2938 // ABIs have such an option. Otherwise the only other thing here is Rust
2939 // itself, and those ABIs are determined by the panic strategy configured
2940 // for this compilation.
2942 // Unfortunately at this time there's also another caveat. Rust [RFC
2943 // 2945][rfc] has been accepted and is in the process of being implemented
2944 // and stabilized. In this interim state we need to deal with historical
2945 // rustc behavior as well as plan for future rustc behavior.
2947 // Historically functions declared with `extern "C"` were marked at the
2948 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2949 // or not. This is UB for functions in `panic=unwind` mode that then
2950 // actually panic and unwind. Note that this behavior is true for both
2951 // externally declared functions as well as Rust-defined function.
2953 // To fix this UB rustc would like to change in the future to catch unwinds
2954 // from function calls that may unwind within a Rust-defined `extern "C"`
2955 // function and forcibly abort the process, thereby respecting the
2956 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2957 // ready to roll out, so determining whether or not the `C` family of ABIs
2958 // unwinds is conditional not only on their definition but also whether the
2959 // `#![feature(c_unwind)]` feature gate is active.
2961 // Note that this means that unlike historical compilers rustc now, by
2962 // default, unconditionally thinks that the `C` ABI may unwind. This will
2963 // prevent some optimization opportunities, however, so we try to scope this
2964 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2965 // to `panic=abort`).
2967 // Eventually the check against `c_unwind` here will ideally get removed and
2968 // this'll be a little cleaner as it'll be a straightforward check of the
2971 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2977 | Stdcall { unwind }
2978 | Fastcall { unwind }
2979 | Vectorcall { unwind }
2980 | Thiscall { unwind }
2983 | SysV64 { unwind } => {
2985 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2993 | AvrNonBlockingInterrupt
2994 | CCmseNonSecureCall
2998 | Unadjusted => false,
2999 Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
3004 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
3005 use rustc_target::spec::abi::Abi::*;
3006 match tcx.sess.target.adjust_abi(abi) {
3007 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
3008 RustCold => Conv::RustCold,
3010 // It's the ABI's job to select this, not ours.
3011 System { .. } => bug!("system abi should be selected elsewhere"),
3012 EfiApi => bug!("eficall abi should be selected elsewhere"),
3014 Stdcall { .. } => Conv::X86Stdcall,
3015 Fastcall { .. } => Conv::X86Fastcall,
3016 Vectorcall { .. } => Conv::X86VectorCall,
3017 Thiscall { .. } => Conv::X86ThisCall,
3018 C { .. } => Conv::C,
3019 Unadjusted => Conv::C,
3020 Win64 { .. } => Conv::X86_64Win64,
3021 SysV64 { .. } => Conv::X86_64SysV,
3022 Aapcs { .. } => Conv::ArmAapcs,
3023 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
3024 PtxKernel => Conv::PtxKernel,
3025 Msp430Interrupt => Conv::Msp430Intr,
3026 X86Interrupt => Conv::X86Intr,
3027 AmdGpuKernel => Conv::AmdGpuKernel,
3028 AvrInterrupt => Conv::AvrInterrupt,
3029 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3032 // These API constants ought to be more specific...
3033 Cdecl { .. } => Conv::C,
3037 /// Error produced by attempting to compute or adjust a `FnAbi`.
3038 #[derive(Copy, Clone, Debug, HashStable)]
3039 pub enum FnAbiError<'tcx> {
3040 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3041 Layout(LayoutError<'tcx>),
3043 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3044 AdjustForForeignAbi(call::AdjustForForeignAbiError),
3047 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3048 fn from(err: LayoutError<'tcx>) -> Self {
3053 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3054 fn from(err: call::AdjustForForeignAbiError) -> Self {
3055 Self::AdjustForForeignAbi(err)
3059 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3060 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3062 Self::Layout(err) => err.fmt(f),
3063 Self::AdjustForForeignAbi(err) => err.fmt(f),
3068 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3069 // just for error handling.
3071 pub enum FnAbiRequest<'tcx> {
3072 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3073 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3076 /// Trait for contexts that want to be able to compute `FnAbi`s.
3077 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3078 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3079 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3080 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3081 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3083 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3084 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3086 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3087 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3088 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3089 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3090 fn handle_fn_abi_err(
3092 err: FnAbiError<'tcx>,
3094 fn_abi_request: FnAbiRequest<'tcx>,
3095 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3098 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3099 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3100 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3102 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3103 /// instead, where the instance is an `InstanceDef::Virtual`.
3105 fn fn_abi_of_fn_ptr(
3107 sig: ty::PolyFnSig<'tcx>,
3108 extra_args: &'tcx ty::List<Ty<'tcx>>,
3109 ) -> Self::FnAbiOfResult {
3110 // FIXME(eddyb) get a better `span` here.
3111 let span = self.layout_tcx_at_span();
3112 let tcx = self.tcx().at(span);
3114 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3115 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3119 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3120 /// direct calls to an `fn`.
3122 /// NB: that includes virtual calls, which are represented by "direct calls"
3123 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3125 #[tracing::instrument(level = "debug", skip(self))]
3126 fn fn_abi_of_instance(
3128 instance: ty::Instance<'tcx>,
3129 extra_args: &'tcx ty::List<Ty<'tcx>>,
3130 ) -> Self::FnAbiOfResult {
3131 // FIXME(eddyb) get a better `span` here.
3132 let span = self.layout_tcx_at_span();
3133 let tcx = self.tcx().at(span);
3136 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3137 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3138 // we can get some kind of span even if one wasn't provided.
3139 // However, we don't do this early in order to avoid calling
3140 // `def_span` unconditionally (which may have a perf penalty).
3141 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3142 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3148 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3150 fn fn_abi_of_fn_ptr<'tcx>(
3152 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3153 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3154 let (param_env, (sig, extra_args)) = query.into_parts();
3156 LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3159 fn fn_abi_of_instance<'tcx>(
3161 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3162 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3163 let (param_env, (instance, extra_args)) = query.into_parts();
3165 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3167 let caller_location = if instance.def.requires_caller_location(tcx) {
3168 Some(tcx.caller_location_ty())
3173 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3177 Some(instance.def_id()),
3178 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3182 // Handle safe Rust thin and fat pointers.
3183 pub fn adjust_for_rust_scalar<'tcx>(
3184 cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3185 attrs: &mut ArgAttributes,
3187 layout: TyAndLayout<'tcx>,
3191 // Booleans are always a noundef i1 that needs to be zero-extended.
3192 if scalar.is_bool() {
3193 attrs.ext(ArgExtension::Zext);
3194 attrs.set(ArgAttribute::NoUndef);
3198 // Scalars which have invalid values cannot be undef.
3199 if !scalar.is_always_valid(&cx) {
3200 attrs.set(ArgAttribute::NoUndef);
3203 // Only pointer types handled below.
3204 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3206 if !valid_range.contains(0) {
3207 attrs.set(ArgAttribute::NonNull);
3210 if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3211 if let Some(kind) = pointee.safe {
3212 attrs.pointee_align = Some(pointee.align);
3214 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3215 // for the entire duration of the function as they can be deallocated
3216 // at any time. Same for shared mutable references. If LLVM had a
3217 // way to say "dereferenceable on entry" we could use it here.
3218 attrs.pointee_size = match kind {
3219 PointerKind::UniqueBorrowed
3220 | PointerKind::UniqueBorrowedPinned
3221 | PointerKind::Frozen => pointee.size,
3222 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3225 // `Box`, `&T`, and `&mut T` cannot be undef.
3226 // Note that this only applies to the value of the pointer itself;
3227 // this attribute doesn't make it UB for the pointed-to data to be undef.
3228 attrs.set(ArgAttribute::NoUndef);
3230 // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3231 // `noalias` for it. This can be turned off using an unstable flag.
3232 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3233 let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3235 // `&mut` pointer parameters never alias other parameters,
3236 // or mutable global data
3238 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3239 // and can be marked as both `readonly` and `noalias`, as
3240 // LLVM's definition of `noalias` is based solely on memory
3241 // dependencies rather than pointer equality
3243 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3244 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3245 // or not to actually emit the attribute. It can also be controlled with the
3246 // `-Zmutable-noalias` debugging option.
3247 let no_alias = match kind {
3248 PointerKind::SharedMutable
3249 | PointerKind::UniqueBorrowed
3250 | PointerKind::UniqueBorrowedPinned => false,
3251 PointerKind::UniqueOwned => noalias_for_box,
3252 PointerKind::Frozen => !is_return,
3255 attrs.set(ArgAttribute::NoAlias);
3258 if kind == PointerKind::Frozen && !is_return {
3259 attrs.set(ArgAttribute::ReadOnly);
3262 if kind == PointerKind::UniqueBorrowed && !is_return {
3263 attrs.set(ArgAttribute::NoAliasMutRef);
3269 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3270 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3271 // arguments of this method, into a separate `struct`.
3272 #[tracing::instrument(
3274 skip(self, caller_location, fn_def_id, force_thin_self_ptr)
3276 fn fn_abi_new_uncached(
3278 sig: ty::PolyFnSig<'tcx>,
3279 extra_args: &[Ty<'tcx>],
3280 caller_location: Option<Ty<'tcx>>,
3281 fn_def_id: Option<DefId>,
3282 // FIXME(eddyb) replace this with something typed, like an `enum`.
3283 force_thin_self_ptr: bool,
3284 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3285 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3287 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3289 let mut inputs = sig.inputs();
3290 let extra_args = if sig.abi == RustCall {
3291 assert!(!sig.c_variadic && extra_args.is_empty());
3293 if let Some(input) = sig.inputs().last() {
3294 if let ty::Tuple(tupled_arguments) = input.kind() {
3295 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3299 "argument to function with \"rust-call\" ABI \
3305 "argument to function with \"rust-call\" ABI \
3310 assert!(sig.c_variadic || extra_args.is_empty());
3314 let target = &self.tcx.sess.target;
3315 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3316 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3317 let linux_s390x_gnu_like =
3318 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3319 let linux_sparc64_gnu_like =
3320 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3321 let linux_powerpc_gnu_like =
3322 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3324 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3326 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3327 let span = tracing::debug_span!("arg_of");
3328 let _entered = span.enter();
3329 let is_return = arg_idx.is_none();
3331 let layout = self.layout_of(ty)?;
3332 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3333 // Don't pass the vtable, it's not an argument of the virtual fn.
3334 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3335 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3336 make_thin_self_ptr(self, layout)
3341 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3342 let mut attrs = ArgAttributes::new();
3343 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3347 if arg.layout.is_zst() {
3348 // For some forsaken reason, x86_64-pc-windows-gnu
3349 // doesn't ignore zero-sized struct arguments.
3350 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3354 && !linux_s390x_gnu_like
3355 && !linux_sparc64_gnu_like
3356 && !linux_powerpc_gnu_like)
3358 arg.mode = PassMode::Ignore;
3365 let mut fn_abi = FnAbi {
3366 ret: arg_of(sig.output(), None)?,
3370 .chain(extra_args.iter().copied())
3371 .chain(caller_location)
3373 .map(|(i, ty)| arg_of(ty, Some(i)))
3374 .collect::<Result<_, _>>()?,
3375 c_variadic: sig.c_variadic,
3376 fixed_count: inputs.len() as u32,
3378 can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3380 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3381 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3382 Ok(self.tcx.arena.alloc(fn_abi))
3385 #[tracing::instrument(level = "trace", skip(self))]
3386 fn fn_abi_adjust_for_abi(
3388 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3390 ) -> Result<(), FnAbiError<'tcx>> {
3391 if abi == SpecAbi::Unadjusted {
3395 if abi == SpecAbi::Rust
3396 || abi == SpecAbi::RustCall
3397 || abi == SpecAbi::RustIntrinsic
3398 || abi == SpecAbi::PlatformIntrinsic
3400 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3401 if arg.is_ignore() {
3405 match arg.layout.abi {
3406 Abi::Aggregate { .. } => {}
3408 // This is a fun case! The gist of what this is doing is
3409 // that we want callers and callees to always agree on the
3410 // ABI of how they pass SIMD arguments. If we were to *not*
3411 // make these arguments indirect then they'd be immediates
3412 // in LLVM, which means that they'd used whatever the
3413 // appropriate ABI is for the callee and the caller. That
3414 // means, for example, if the caller doesn't have AVX
3415 // enabled but the callee does, then passing an AVX argument
3416 // across this boundary would cause corrupt data to show up.
3418 // This problem is fixed by unconditionally passing SIMD
3419 // arguments through memory between callers and callees
3420 // which should get them all to agree on ABI regardless of
3421 // target feature sets. Some more information about this
3422 // issue can be found in #44367.
3424 // Note that the platform intrinsic ABI is exempt here as
3425 // that's how we connect up to LLVM and it's unstable
3426 // anyway, we control all calls to it in libstd.
3428 if abi != SpecAbi::PlatformIntrinsic
3429 && self.tcx.sess.target.simd_types_indirect =>
3431 arg.make_indirect();
3438 let size = arg.layout.size;
3439 if arg.layout.is_unsized() || size > Pointer.size(self) {
3440 arg.make_indirect();
3442 // We want to pass small aggregates as immediates, but using
3443 // a LLVM aggregate type for this leads to bad optimizations,
3444 // so we pick an appropriately sized integer type instead.
3445 arg.cast_to(Reg { kind: RegKind::Integer, size });
3448 fixup(&mut fn_abi.ret);
3449 for arg in fn_abi.args.iter_mut() {
3453 fn_abi.adjust_for_foreign_abi(self, abi)?;
3460 #[tracing::instrument(level = "debug", skip(cx))]
3461 fn make_thin_self_ptr<'tcx>(
3462 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3463 layout: TyAndLayout<'tcx>,
3464 ) -> TyAndLayout<'tcx> {
3466 let fat_pointer_ty = if layout.is_unsized() {
3467 // unsized `self` is passed as a pointer to `self`
3468 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3469 tcx.mk_mut_ptr(layout.ty)
3472 Abi::ScalarPair(..) | Abi::Scalar(..) => (),
3473 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3476 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3477 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3478 // elsewhere in the compiler as a method on a `dyn Trait`.
3479 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3480 // get a built-in pointer type
3481 let mut fat_pointer_layout = layout;
3482 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3483 && !fat_pointer_layout.ty.is_region_ptr()
3485 for i in 0..fat_pointer_layout.fields.count() {
3486 let field_layout = fat_pointer_layout.field(cx, i);
3488 if !field_layout.is_zst() {
3489 fat_pointer_layout = field_layout;
3490 continue 'descend_newtypes;
3494 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3497 fat_pointer_layout.ty
3500 // we now have a type like `*mut RcBox<dyn Trait>`
3501 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3502 // this is understood as a special case elsewhere in the compiler
3503 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3508 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3509 // should always work because the type is always `*mut ()`.
3510 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()