1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
5 self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
9 use rustc_attr as attr;
10 use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
12 use rustc_hir::def_id::DefId;
13 use rustc_hir::lang_items::LangItem;
14 use rustc_index::bit_set::BitSet;
15 use rustc_index::vec::{Idx, IndexVec};
16 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
17 use rustc_span::symbol::Symbol;
18 use rustc_span::{Span, DUMMY_SP};
19 use rustc_target::abi::call::{
20 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
22 use rustc_target::abi::*;
23 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
25 use std::cmp::{self, Ordering};
28 use std::num::NonZeroUsize;
31 use rand::{seq::SliceRandom, SeedableRng};
32 use rand_xoshiro::Xoshiro128StarStar;
34 pub fn provide(providers: &mut ty::query::Providers) {
36 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
39 pub trait IntegerExt {
40 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
41 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
43 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
53 impl IntegerExt for Integer {
55 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
56 match (*self, signed) {
57 (I8, false) => tcx.types.u8,
58 (I16, false) => tcx.types.u16,
59 (I32, false) => tcx.types.u32,
60 (I64, false) => tcx.types.u64,
61 (I128, false) => tcx.types.u128,
62 (I8, true) => tcx.types.i8,
63 (I16, true) => tcx.types.i16,
64 (I32, true) => tcx.types.i32,
65 (I64, true) => tcx.types.i64,
66 (I128, true) => tcx.types.i128,
70 /// Gets the Integer type from an attr::IntType.
71 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
72 let dl = cx.data_layout();
75 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
76 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
77 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
78 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
79 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
80 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
81 dl.ptr_sized_integer()
86 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
89 ty::IntTy::I16 => I16,
90 ty::IntTy::I32 => I32,
91 ty::IntTy::I64 => I64,
92 ty::IntTy::I128 => I128,
93 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
96 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
99 ty::UintTy::U16 => I16,
100 ty::UintTy::U32 => I32,
101 ty::UintTy::U64 => I64,
102 ty::UintTy::U128 => I128,
103 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
107 /// Finds the appropriate Integer type and signedness for the given
108 /// signed discriminant range and `#[repr]` attribute.
109 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
110 /// that shouldn't affect anything, other than maybe debuginfo.
117 ) -> (Integer, bool) {
118 // Theoretically, negative values could be larger in unsigned representation
119 // than the unsigned representation of the signed minimum. However, if there
120 // are any negative values, the only valid unsigned representation is u128
121 // which can fit all i128 values, so the result remains unaffected.
122 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
123 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
125 if let Some(ity) = repr.int {
126 let discr = Integer::from_attr(&tcx, ity);
127 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
130 "Integer::repr_discr: `#[repr]` hint too small for \
131 discriminant range of enum `{}",
135 return (discr, ity.is_signed());
138 let at_least = if repr.c() {
139 // This is usually I32, however it can be different on some platforms,
140 // notably hexagon and arm-none/thumb-none
141 tcx.data_layout().c_enum_min_size
143 // repr(Rust) enums try to be as small as possible
147 // If there are no negative values, we can use the unsigned fit.
149 (cmp::max(unsigned_fit, at_least), false)
151 (cmp::max(signed_fit, at_least), true)
156 pub trait PrimitiveExt {
157 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
161 impl PrimitiveExt for Primitive {
163 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
165 Int(i, signed) => i.to_ty(tcx, signed),
166 F32 => tcx.types.f32,
167 F64 => tcx.types.f64,
168 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
172 /// Return an *integer* type matching this primitive.
173 /// Useful in particular when dealing with enum discriminants.
175 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
177 Int(i, signed) => i.to_ty(tcx, signed),
178 Pointer => tcx.types.usize,
179 F32 | F64 => bug!("floats do not have an int type"),
184 /// The first half of a fat pointer.
186 /// - For a trait object, this is the address of the box.
187 /// - For a slice, this is the base address.
188 pub const FAT_PTR_ADDR: usize = 0;
190 /// The second half of a fat pointer.
192 /// - For a trait object, this is the address of the vtable.
193 /// - For a slice, this is the length.
194 pub const FAT_PTR_EXTRA: usize = 1;
196 /// The maximum supported number of lanes in a SIMD vector.
198 /// This value is selected based on backend support:
199 /// * LLVM does not appear to have a vector width limit.
200 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
201 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
203 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
204 pub enum LayoutError<'tcx> {
206 SizeOverflow(Ty<'tcx>),
207 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
210 impl<'a> IntoDiagnostic<'a, !> for LayoutError<'a> {
211 fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, !> {
212 handler.struct_fatal(self.to_string())
216 impl<'tcx> fmt::Display for LayoutError<'tcx> {
217 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
219 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
220 LayoutError::SizeOverflow(ty) => {
221 write!(f, "values of the type `{}` are too big for the current architecture", ty)
223 LayoutError::NormalizationFailure(t, e) => write!(
225 "unable to determine layout for `{}` because `{}` cannot be normalized",
227 e.get_type_for_failure()
233 #[instrument(skip(tcx, query), level = "debug")]
236 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
237 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
238 let (param_env, ty) = query.into_parts();
241 let param_env = param_env.with_reveal_all_normalized(tcx);
242 let unnormalized_ty = ty;
244 // FIXME: We might want to have two different versions of `layout_of`:
245 // One that can be called after typecheck has completed and can use
246 // `normalize_erasing_regions` here and another one that can be called
247 // before typecheck has completed and uses `try_normalize_erasing_regions`.
248 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
250 Err(normalization_error) => {
251 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
255 if ty != unnormalized_ty {
256 // Ensure this layout is also cached for the normalized type.
257 return tcx.layout_of(param_env.and(ty));
260 let cx = LayoutCx { tcx, param_env };
262 let layout = cx.layout_of_uncached(ty)?;
263 let layout = TyAndLayout { ty, layout };
265 cx.record_layout_for_printing(layout);
267 sanity_check_layout(&cx, &layout);
272 #[derive(Clone, Copy)]
273 pub struct LayoutCx<'tcx, C> {
275 pub param_env: ty::ParamEnv<'tcx>,
278 #[derive(Copy, Clone, Debug)]
280 /// A tuple, closure, or univariant which cannot be coerced to unsized.
282 /// A univariant, the last field of which may be coerced to unsized.
284 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
285 Prefixed(Size, Align),
288 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
289 // This is used to go between `memory_index` (source field order to memory order)
290 // and `inverse_memory_index` (memory order to source field order).
291 // See also `FieldsShape::Arbitrary::memory_index` for more details.
292 // FIXME(eddyb) build a better abstraction for permutations, if possible.
293 fn invert_mapping(map: &[u32]) -> Vec<u32> {
294 let mut inverse = vec![0; map.len()];
295 for i in 0..map.len() {
296 inverse[map[i] as usize] = i as u32;
301 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
302 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
303 let dl = self.data_layout();
304 let b_align = b.align(dl);
305 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
306 let b_offset = a.size(dl).align_to(b_align.abi);
307 let size = (b_offset + b.size(dl)).align_to(align.abi);
309 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
310 // returns the last maximum.
311 let largest_niche = Niche::from_scalar(dl, b_offset, b)
313 .chain(Niche::from_scalar(dl, Size::ZERO, a))
314 .max_by_key(|niche| niche.available(dl));
317 variants: Variants::Single { index: VariantIdx::new(0) },
318 fields: FieldsShape::Arbitrary {
319 offsets: vec![Size::ZERO, b_offset],
320 memory_index: vec![0, 1],
322 abi: Abi::ScalarPair(a, b),
329 fn univariant_uninterned(
332 fields: &[TyAndLayout<'_>],
335 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
336 let dl = self.data_layout();
337 let pack = repr.pack;
338 if pack.is_some() && repr.align.is_some() {
339 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
340 return Err(LayoutError::Unknown(ty));
343 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
345 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
347 let optimize = !repr.inhibit_struct_field_reordering_opt();
350 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
351 let optimizing = &mut inverse_memory_index[..end];
352 let field_align = |f: &TyAndLayout<'_>| {
353 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
356 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
357 // the field ordering to try and catch some code making assumptions about layouts
358 // we don't guarantee
359 if repr.can_randomize_type_layout() {
360 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
361 // randomize field ordering with
362 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
364 // Shuffle the ordering of the fields
365 optimizing.shuffle(&mut rng);
367 // Otherwise we just leave things alone and actually optimize the type's fields
370 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
371 optimizing.sort_by_key(|&x| {
372 // Place ZSTs first to avoid "interesting offsets",
373 // especially with only one or two non-ZST fields.
374 let f = &fields[x as usize];
375 (!f.is_zst(), cmp::Reverse(field_align(f)))
379 StructKind::Prefixed(..) => {
380 // Sort in ascending alignment so that the layout stays optimal
381 // regardless of the prefix
382 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
386 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
387 // regardless of the status of `-Z randomize-layout`
391 // inverse_memory_index holds field indices by increasing memory offset.
392 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
393 // We now write field offsets to the corresponding offset slot;
394 // field 5 with offset 0 puts 0 in offsets[5].
395 // At the bottom of this function, we invert `inverse_memory_index` to
396 // produce `memory_index` (see `invert_mapping`).
398 let mut sized = true;
399 let mut offsets = vec![Size::ZERO; fields.len()];
400 let mut offset = Size::ZERO;
401 let mut largest_niche = None;
402 let mut largest_niche_available = 0;
404 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
406 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
407 align = align.max(AbiAndPrefAlign::new(prefix_align));
408 offset = prefix_size.align_to(prefix_align);
411 for &i in &inverse_memory_index {
412 let field = fields[i as usize];
414 self.tcx.sess.delay_span_bug(
417 "univariant: field #{} of `{}` comes after unsized field",
424 if field.is_unsized() {
428 // Invariant: offset < dl.obj_size_bound() <= 1<<61
429 let field_align = if let Some(pack) = pack {
430 field.align.min(AbiAndPrefAlign::new(pack))
434 offset = offset.align_to(field_align.abi);
435 align = align.max(field_align);
437 debug!("univariant offset: {:?} field: {:#?}", offset, field);
438 offsets[i as usize] = offset;
440 if let Some(mut niche) = field.largest_niche {
441 let available = niche.available(dl);
442 if available > largest_niche_available {
443 largest_niche_available = available;
444 niche.offset += offset;
445 largest_niche = Some(niche);
449 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
452 if let Some(repr_align) = repr.align {
453 align = align.max(AbiAndPrefAlign::new(repr_align));
456 debug!("univariant min_size: {:?}", offset);
457 let min_size = offset;
459 // As stated above, inverse_memory_index holds field indices by increasing offset.
460 // This makes it an already-sorted view of the offsets vec.
461 // To invert it, consider:
462 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
463 // Field 5 would be the first element, so memory_index is i:
464 // Note: if we didn't optimize, it's already right.
467 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
469 let size = min_size.align_to(align.abi);
470 let mut abi = Abi::Aggregate { sized };
472 // Unpack newtype ABIs and find scalar pairs.
473 if sized && size.bytes() > 0 {
474 // All other fields must be ZSTs.
475 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
477 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
478 // We have exactly one non-ZST field.
479 (Some((i, field)), None, None) => {
480 // Field fills the struct and it has a scalar or scalar pair ABI.
481 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
484 // For plain scalars, or vectors of them, we can't unpack
485 // newtypes for `#[repr(C)]`, as that affects C ABIs.
486 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
489 // But scalar pairs are Rust-specific and get
490 // treated as aggregates by C ABIs anyway.
491 Abi::ScalarPair(..) => {
499 // Two non-ZST fields, and they're both scalars.
500 (Some((i, a)), Some((j, b)), None) => {
501 match (a.abi, b.abi) {
502 (Abi::Scalar(a), Abi::Scalar(b)) => {
503 // Order by the memory placement, not source order.
504 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
509 let pair = self.scalar_pair(a, b);
510 let pair_offsets = match pair.fields {
511 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
512 assert_eq!(memory_index, &[0, 1]);
517 if offsets[i] == pair_offsets[0]
518 && offsets[j] == pair_offsets[1]
519 && align == pair.align
522 // We can use `ScalarPair` only when it matches our
523 // already computed layout (including `#[repr(C)]`).
535 if fields.iter().any(|f| f.abi.is_uninhabited()) {
536 abi = Abi::Uninhabited;
540 variants: Variants::Single { index: VariantIdx::new(0) },
541 fields: FieldsShape::Arbitrary { offsets, memory_index },
549 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
551 let param_env = self.param_env;
552 let dl = self.data_layout();
553 let scalar_unit = |value: Primitive| {
554 let size = value.size(dl);
555 assert!(size.bits() <= 128);
556 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
559 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
561 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
562 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
564 debug_assert!(!ty.has_infer_types_or_consts());
566 Ok(match *ty.kind() {
568 ty::Bool => tcx.intern_layout(LayoutS::scalar(
570 Scalar::Initialized {
571 value: Int(I8, false),
572 valid_range: WrappingRange { start: 0, end: 1 },
575 ty::Char => tcx.intern_layout(LayoutS::scalar(
577 Scalar::Initialized {
578 value: Int(I32, false),
579 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
582 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
583 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
584 ty::Float(fty) => scalar(match fty {
585 ty::FloatTy::F32 => F32,
586 ty::FloatTy::F64 => F64,
589 let mut ptr = scalar_unit(Pointer);
590 ptr.valid_range_mut().start = 1;
591 tcx.intern_layout(LayoutS::scalar(self, ptr))
595 ty::Never => tcx.intern_layout(LayoutS {
596 variants: Variants::Single { index: VariantIdx::new(0) },
597 fields: FieldsShape::Primitive,
598 abi: Abi::Uninhabited,
604 // Potentially-wide pointers.
605 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
606 let mut data_ptr = scalar_unit(Pointer);
607 if !ty.is_unsafe_ptr() {
608 data_ptr.valid_range_mut().start = 1;
611 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
612 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
613 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
616 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
617 let metadata = match unsized_part.kind() {
619 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
621 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
623 let mut vtable = scalar_unit(Pointer);
624 vtable.valid_range_mut().start = 1;
627 _ => return Err(LayoutError::Unknown(unsized_part)),
630 // Effectively a (ptr, meta) tuple.
631 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
634 ty::Dynamic(_, _, ty::DynStar) => {
635 let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
636 data.valid_range_mut().start = 0;
637 let mut vtable = scalar_unit(Pointer);
638 vtable.valid_range_mut().start = 1;
639 tcx.intern_layout(self.scalar_pair(data, vtable))
642 // Arrays and slices.
643 ty::Array(element, mut count) => {
644 if count.has_projections() {
645 count = tcx.normalize_erasing_regions(param_env, count);
646 if count.has_projections() {
647 return Err(LayoutError::Unknown(ty));
651 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
652 let element = self.layout_of(element)?;
654 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
657 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
660 Abi::Aggregate { sized: true }
663 let largest_niche = if count != 0 { element.largest_niche } else { None };
665 tcx.intern_layout(LayoutS {
666 variants: Variants::Single { index: VariantIdx::new(0) },
667 fields: FieldsShape::Array { stride: element.size, count },
670 align: element.align,
674 ty::Slice(element) => {
675 let element = self.layout_of(element)?;
676 tcx.intern_layout(LayoutS {
677 variants: Variants::Single { index: VariantIdx::new(0) },
678 fields: FieldsShape::Array { stride: element.size, count: 0 },
679 abi: Abi::Aggregate { sized: false },
681 align: element.align,
685 ty::Str => tcx.intern_layout(LayoutS {
686 variants: Variants::Single { index: VariantIdx::new(0) },
687 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
688 abi: Abi::Aggregate { sized: false },
695 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
696 ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
697 let mut unit = self.univariant_uninterned(
700 &ReprOptions::default(),
701 StructKind::AlwaysSized,
704 Abi::Aggregate { ref mut sized } => *sized = false,
707 tcx.intern_layout(unit)
710 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
712 ty::Closure(_, ref substs) => {
713 let tys = substs.as_closure().upvar_tys();
715 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
716 &ReprOptions::default(),
717 StructKind::AlwaysSized,
723 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
726 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
727 &ReprOptions::default(),
732 // SIMD vector types.
733 ty::Adt(def, substs) if def.repr().simd() => {
734 if !def.is_struct() {
735 // Should have yielded E0517 by now.
736 tcx.sess.delay_span_bug(
738 "#[repr(simd)] was applied to an ADT that is not a struct",
740 return Err(LayoutError::Unknown(ty));
743 // Supported SIMD vectors are homogeneous ADTs with at least one field:
745 // * #[repr(simd)] struct S(T, T, T, T);
746 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
747 // * #[repr(simd)] struct S([T; 4])
749 // where T is a primitive scalar (integer/float/pointer).
751 // SIMD vectors with zero fields are not supported.
752 // (should be caught by typeck)
753 if def.non_enum_variant().fields.is_empty() {
754 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
757 // Type of the first ADT field:
758 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
760 // Heterogeneous SIMD vectors are not supported:
761 // (should be caught by typeck)
762 for fi in &def.non_enum_variant().fields {
763 if fi.ty(tcx, substs) != f0_ty {
764 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
768 // The element type and number of elements of the SIMD vector
769 // are obtained from:
771 // * the element type and length of the single array field, if
772 // the first field is of array type, or
774 // * the homogeneous field type and the number of fields.
775 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
776 // First ADT field is an array:
778 // SIMD vectors with multiple array fields are not supported:
779 // (should be caught by typeck)
780 if def.non_enum_variant().fields.len() != 1 {
781 tcx.sess.fatal(&format!(
782 "monomorphising SIMD type `{}` with more than one array field",
787 // Extract the number of elements from the layout of the array field:
788 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
789 return Err(LayoutError::Unknown(ty));
792 (*e_ty, *count, true)
794 // First ADT field is not an array:
795 (f0_ty, def.non_enum_variant().fields.len() as _, false)
798 // SIMD vectors of zero length are not supported.
799 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
802 // Can't be caught in typeck if the array length is generic.
804 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
805 } else if e_len > MAX_SIMD_LANES {
806 tcx.sess.fatal(&format!(
807 "monomorphising SIMD type `{}` of length greater than {}",
812 // Compute the ABI of the element type:
813 let e_ly = self.layout_of(e_ty)?;
814 let Abi::Scalar(e_abi) = e_ly.abi else {
815 // This error isn't caught in typeck, e.g., if
816 // the element type of the vector is generic.
817 tcx.sess.fatal(&format!(
818 "monomorphising SIMD type `{}` with a non-primitive-scalar \
819 (integer/float/pointer) element type `{}`",
824 // Compute the size and alignment of the vector:
825 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
826 let align = dl.vector_align(size);
827 let size = size.align_to(align.abi);
829 // Compute the placement of the vector fields:
830 let fields = if is_array {
831 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
833 FieldsShape::Array { stride: e_ly.size, count: e_len }
836 tcx.intern_layout(LayoutS {
837 variants: Variants::Single { index: VariantIdx::new(0) },
839 abi: Abi::Vector { element: e_abi, count: e_len },
840 largest_niche: e_ly.largest_niche,
847 ty::Adt(def, substs) => {
848 // Cache the field layouts.
855 .map(|field| self.layout_of(field.ty(tcx, substs)))
856 .collect::<Result<Vec<_>, _>>()
858 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
861 if def.repr().pack.is_some() && def.repr().align.is_some() {
862 self.tcx.sess.delay_span_bug(
863 tcx.def_span(def.did()),
864 "union cannot be packed and aligned",
866 return Err(LayoutError::Unknown(ty));
870 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
872 if let Some(repr_align) = def.repr().align {
873 align = align.max(AbiAndPrefAlign::new(repr_align));
876 let optimize = !def.repr().inhibit_union_abi_opt();
877 let mut size = Size::ZERO;
878 let mut abi = Abi::Aggregate { sized: true };
879 let index = VariantIdx::new(0);
880 for field in &variants[index] {
881 assert!(!field.is_unsized());
882 align = align.max(field.align);
884 // If all non-ZST fields have the same ABI, forward this ABI
885 if optimize && !field.is_zst() {
886 // Discard valid range information and allow undef
887 let field_abi = match field.abi {
888 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
889 Abi::ScalarPair(x, y) => {
890 Abi::ScalarPair(x.to_union(), y.to_union())
892 Abi::Vector { element: x, count } => {
893 Abi::Vector { element: x.to_union(), count }
895 Abi::Uninhabited | Abi::Aggregate { .. } => {
896 Abi::Aggregate { sized: true }
900 if size == Size::ZERO {
901 // first non ZST: initialize 'abi'
903 } else if abi != field_abi {
904 // different fields have different ABI: reset to Aggregate
905 abi = Abi::Aggregate { sized: true };
909 size = cmp::max(size, field.size);
912 if let Some(pack) = def.repr().pack {
913 align = align.min(AbiAndPrefAlign::new(pack));
916 return Ok(tcx.intern_layout(LayoutS {
917 variants: Variants::Single { index },
918 fields: FieldsShape::Union(
919 NonZeroUsize::new(variants[index].len())
920 .ok_or(LayoutError::Unknown(ty))?,
925 size: size.align_to(align.abi),
929 // A variant is absent if it's uninhabited and only has ZST fields.
930 // Present uninhabited variants only require space for their fields,
931 // but *not* an encoding of the discriminant (e.g., a tag value).
932 // See issue #49298 for more details on the need to leave space
933 // for non-ZST uninhabited data (mostly partial initialization).
934 let absent = |fields: &[TyAndLayout<'_>]| {
935 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
936 let is_zst = fields.iter().all(|f| f.is_zst());
937 uninhabited && is_zst
939 let (present_first, present_second) = {
940 let mut present_variants = variants
942 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
943 (present_variants.next(), present_variants.next())
945 let present_first = match present_first {
946 Some(present_first) => present_first,
947 // Uninhabited because it has no variants, or only absent ones.
948 None if def.is_enum() => {
949 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
951 // If it's a struct, still compute a layout so that we can still compute the
953 None => VariantIdx::new(0),
956 let is_struct = !def.is_enum() ||
957 // Only one variant is present.
958 (present_second.is_none() &&
959 // Representation optimizations are allowed.
960 !def.repr().inhibit_enum_layout_opt());
962 // Struct, or univariant enum equivalent to a struct.
963 // (Typechecking will reject discriminant-sizing attrs.)
965 let v = present_first;
966 let kind = if def.is_enum() || variants[v].is_empty() {
967 StructKind::AlwaysSized
969 let param_env = tcx.param_env(def.did());
970 let last_field = def.variant(v).fields.last().unwrap();
972 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
974 StructKind::MaybeUnsized
976 StructKind::AlwaysSized
980 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
981 st.variants = Variants::Single { index: v };
983 if def.is_unsafe_cell() {
984 let hide_niches = |scalar: &mut _| match scalar {
985 Scalar::Initialized { value, valid_range } => {
986 *valid_range = WrappingRange::full(value.size(dl))
988 // Already doesn't have any niches
989 Scalar::Union { .. } => {}
992 Abi::Uninhabited => {}
993 Abi::Scalar(scalar) => hide_niches(scalar),
994 Abi::ScalarPair(a, b) => {
998 Abi::Vector { element, count: _ } => hide_niches(element),
999 Abi::Aggregate { sized: _ } => {}
1001 st.largest_niche = None;
1002 return Ok(tcx.intern_layout(st));
1005 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
1007 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
1008 // the asserts ensure that we are not using the
1009 // `#[rustc_layout_scalar_valid_range(n)]`
1010 // attribute to widen the range of anything as that would probably
1011 // result in UB somewhere
1012 // FIXME(eddyb) the asserts are probably not needed,
1013 // as larger validity ranges would result in missed
1014 // optimizations, *not* wrongly assuming the inner
1015 // value is valid. e.g. unions enlarge validity ranges,
1016 // because the values may be uninitialized.
1017 if let Bound::Included(start) = start {
1018 // FIXME(eddyb) this might be incorrect - it doesn't
1019 // account for wrap-around (end < start) ranges.
1020 let valid_range = scalar.valid_range_mut();
1021 assert!(valid_range.start <= start);
1022 valid_range.start = start;
1024 if let Bound::Included(end) = end {
1025 // FIXME(eddyb) this might be incorrect - it doesn't
1026 // account for wrap-around (end < start) ranges.
1027 let valid_range = scalar.valid_range_mut();
1028 assert!(valid_range.end >= end);
1029 valid_range.end = end;
1032 // Update `largest_niche` if we have introduced a larger niche.
1033 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1034 if let Some(niche) = niche {
1035 match st.largest_niche {
1036 Some(largest_niche) => {
1037 // Replace the existing niche even if they're equal,
1038 // because this one is at a lower offset.
1039 if largest_niche.available(dl) <= niche.available(dl) {
1040 st.largest_niche = Some(niche);
1043 None => st.largest_niche = Some(niche),
1048 start == Bound::Unbounded && end == Bound::Unbounded,
1049 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1055 return Ok(tcx.intern_layout(st));
1058 // At this point, we have handled all unions and
1059 // structs. (We have also handled univariant enums
1060 // that allow representation optimization.)
1061 assert!(def.is_enum());
1063 // Until we've decided whether to use the tagged or
1064 // niche filling LayoutS, we don't want to intern the
1065 // variant layouts, so we can't store them in the
1066 // overall LayoutS. Store the overall LayoutS
1067 // and the variant LayoutSs here until then.
1068 struct TmpLayout<'tcx> {
1069 layout: LayoutS<'tcx>,
1070 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1073 let calculate_niche_filling_layout =
1074 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1075 // The current code for niche-filling relies on variant indices
1076 // instead of actual discriminants, so enums with
1077 // explicit discriminants (RFC #2363) would misbehave.
1078 if def.repr().inhibit_enum_layout_opt()
1082 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1087 if variants.len() < 2 {
1091 let mut align = dl.aggregate_align;
1092 let mut variant_layouts = variants
1095 let mut st = self.univariant_uninterned(
1099 StructKind::AlwaysSized,
1101 st.variants = Variants::Single { index: j };
1103 align = align.max(st.align);
1107 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1109 let largest_variant_index = match variant_layouts
1111 .max_by_key(|(_i, layout)| layout.size.bytes())
1112 .map(|(i, _layout)| i)
1114 None => return Ok(None),
1118 let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1119 let needs_disc = |index: VariantIdx| {
1120 index != largest_variant_index && !absent(&variants[index])
1122 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1123 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1125 let count = niche_variants.size_hint().1.unwrap() as u128;
1127 // Find the field with the largest niche
1128 let (field_index, niche, (niche_start, niche_scalar)) = match variants
1129 [largest_variant_index]
1132 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1133 .max_by_key(|(_, niche)| niche.available(dl))
1134 .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1136 None => return Ok(None),
1140 let niche_offset = niche.offset
1141 + variant_layouts[largest_variant_index].fields.offset(field_index);
1142 let niche_size = niche.value.size(dl);
1143 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1145 let all_variants_fit =
1146 variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1147 if i == largest_variant_index {
1151 layout.largest_niche = None;
1153 if layout.size <= niche_offset {
1154 // This variant will fit before the niche.
1158 // Determine if it'll fit after the niche.
1159 let this_align = layout.align.abi;
1160 let this_offset = (niche_offset + niche_size).align_to(this_align);
1162 if this_offset + layout.size > size {
1166 // It'll fit, but we need to make some adjustments.
1167 match layout.fields {
1168 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1169 for (j, offset) in offsets.iter_mut().enumerate() {
1170 if !variants[i][j].is_zst() {
1171 *offset += this_offset;
1176 panic!("Layout of fields should be Arbitrary for variants")
1180 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1181 if !layout.abi.is_uninhabited() {
1182 layout.abi = Abi::Aggregate { sized: true };
1184 layout.size += this_offset;
1189 if !all_variants_fit {
1193 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1195 let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1196 i == largest_variant_index || layout.size == Size::ZERO
1198 let same_size = size == variant_layouts[largest_variant_index].size;
1199 let same_align = align == variant_layouts[largest_variant_index].align;
1201 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1203 } else if same_size && same_align && others_zst {
1204 match variant_layouts[largest_variant_index].abi {
1205 // When the total alignment and size match, we can use the
1206 // same ABI as the scalar variant with the reserved niche.
1207 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1208 Abi::ScalarPair(first, second) => {
1209 // Only the niche is guaranteed to be initialised,
1210 // so use union layouts for the other primitive.
1211 if niche_offset == Size::ZERO {
1212 Abi::ScalarPair(niche_scalar, second.to_union())
1214 Abi::ScalarPair(first.to_union(), niche_scalar)
1217 _ => Abi::Aggregate { sized: true },
1220 Abi::Aggregate { sized: true }
1223 let layout = LayoutS {
1224 variants: Variants::Multiple {
1226 tag_encoding: TagEncoding::Niche {
1227 untagged_variant: largest_variant_index,
1232 variants: IndexVec::new(),
1234 fields: FieldsShape::Arbitrary {
1235 offsets: vec![niche_offset],
1236 memory_index: vec![0],
1244 Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1247 let niche_filling_layout = calculate_niche_filling_layout()?;
1249 let (mut min, mut max) = (i128::MAX, i128::MIN);
1250 let discr_type = def.repr().discr_type();
1251 let bits = Integer::from_attr(self, discr_type).size().bits();
1252 for (i, discr) in def.discriminants(tcx) {
1253 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1256 let mut x = discr.val as i128;
1257 if discr_type.is_signed() {
1258 // sign extend the raw representation to be an i128
1259 x = (x << (128 - bits)) >> (128 - bits);
1268 // We might have no inhabited variants, so pretend there's at least one.
1269 if (min, max) == (i128::MAX, i128::MIN) {
1273 assert!(min <= max, "discriminant range is {}...{}", min, max);
1274 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1276 let mut align = dl.aggregate_align;
1277 let mut size = Size::ZERO;
1279 // We're interested in the smallest alignment, so start large.
1280 let mut start_align = Align::from_bytes(256).unwrap();
1281 assert_eq!(Integer::for_align(dl, start_align), None);
1283 // repr(C) on an enum tells us to make a (tag, union) layout,
1284 // so we need to grow the prefix alignment to be at least
1285 // the alignment of the union. (This value is used both for
1286 // determining the alignment of the overall enum, and the
1287 // determining the alignment of the payload after the tag.)
1288 let mut prefix_align = min_ity.align(dl).abi;
1290 for fields in &variants {
1291 for field in fields {
1292 prefix_align = prefix_align.max(field.align.abi);
1297 // Create the set of structs that represent each variant.
1298 let mut layout_variants = variants
1300 .map(|(i, field_layouts)| {
1301 let mut st = self.univariant_uninterned(
1305 StructKind::Prefixed(min_ity.size(), prefix_align),
1307 st.variants = Variants::Single { index: i };
1308 // Find the first field we can't move later
1309 // to make room for a larger discriminant.
1311 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1313 if !field.is_zst() || field.align.abi.bytes() != 1 {
1314 start_align = start_align.min(field.align.abi);
1318 size = cmp::max(size, st.size);
1319 align = align.max(st.align);
1322 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1324 // Align the maximum variant size to the largest alignment.
1325 size = size.align_to(align.abi);
1327 if size.bytes() >= dl.obj_size_bound() {
1328 return Err(LayoutError::SizeOverflow(ty));
1331 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1332 if typeck_ity < min_ity {
1333 // It is a bug if Layout decided on a greater discriminant size than typeck for
1334 // some reason at this point (based on values discriminant can take on). Mostly
1335 // because this discriminant will be loaded, and then stored into variable of
1336 // type calculated by typeck. Consider such case (a bug): typeck decided on
1337 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1338 // discriminant values. That would be a bug, because then, in codegen, in order
1339 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1340 // space necessary to represent would have to be discarded (or layout is wrong
1341 // on thinking it needs 16 bits)
1343 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1347 // However, it is fine to make discr type however large (as an optimisation)
1348 // after this point – we’ll just truncate the value we load in codegen.
1351 // Check to see if we should use a different type for the
1352 // discriminant. We can safely use a type with the same size
1353 // as the alignment of the first field of each variant.
1354 // We increase the size of the discriminant to avoid LLVM copying
1355 // padding when it doesn't need to. This normally causes unaligned
1356 // load/stores and excessive memcpy/memset operations. By using a
1357 // bigger integer size, LLVM can be sure about its contents and
1358 // won't be so conservative.
1360 // Use the initial field alignment
1361 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1364 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1367 // If the alignment is not larger than the chosen discriminant size,
1368 // don't use the alignment as the final size.
1372 // Patch up the variants' first few fields.
1373 let old_ity_size = min_ity.size();
1374 let new_ity_size = ity.size();
1375 for variant in &mut layout_variants {
1376 match variant.fields {
1377 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1379 if *i <= old_ity_size {
1380 assert_eq!(*i, old_ity_size);
1384 // We might be making the struct larger.
1385 if variant.size <= old_ity_size {
1386 variant.size = new_ity_size;
1394 let tag_mask = ity.size().unsigned_int_max();
1395 let tag = Scalar::Initialized {
1396 value: Int(ity, signed),
1397 valid_range: WrappingRange {
1398 start: (min as u128 & tag_mask),
1399 end: (max as u128 & tag_mask),
1402 let mut abi = Abi::Aggregate { sized: true };
1404 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1405 abi = Abi::Uninhabited;
1406 } else if tag.size(dl) == size {
1407 // Make sure we only use scalar layout when the enum is entirely its
1408 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1409 abi = Abi::Scalar(tag);
1411 // Try to use a ScalarPair for all tagged enums.
1412 let mut common_prim = None;
1413 let mut common_prim_initialized_in_all_variants = true;
1414 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1415 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1419 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1420 let (field, offset) = match (fields.next(), fields.next()) {
1422 common_prim_initialized_in_all_variants = false;
1425 (Some(pair), None) => pair,
1431 let prim = match field.abi {
1432 Abi::Scalar(scalar) => {
1433 common_prim_initialized_in_all_variants &=
1434 matches!(scalar, Scalar::Initialized { .. });
1442 if let Some(pair) = common_prim {
1443 // This is pretty conservative. We could go fancier
1444 // by conflating things like i32 and u32, or even
1445 // realising that (u8, u8) could just cohabit with
1447 if pair != (prim, offset) {
1452 common_prim = Some((prim, offset));
1455 if let Some((prim, offset)) = common_prim {
1456 let prim_scalar = if common_prim_initialized_in_all_variants {
1459 // Common prim might be uninit.
1460 Scalar::Union { value: prim }
1462 let pair = self.scalar_pair(tag, prim_scalar);
1463 let pair_offsets = match pair.fields {
1464 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1465 assert_eq!(memory_index, &[0, 1]);
1470 if pair_offsets[0] == Size::ZERO
1471 && pair_offsets[1] == *offset
1472 && align == pair.align
1473 && size == pair.size
1475 // We can use `ScalarPair` only when it matches our
1476 // already computed layout (including `#[repr(C)]`).
1482 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1483 // variants to ensure they are consistent. This is because a downcast is
1484 // semantically a NOP, and thus should not affect layout.
1485 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1486 for variant in &mut layout_variants {
1487 // We only do this for variants with fields; the others are not accessed anyway.
1488 // Also do not overwrite any already existing "clever" ABIs.
1489 if variant.fields.count() > 0
1490 && matches!(variant.abi, Abi::Aggregate { .. })
1493 // Also need to bump up the size and alignment, so that the entire value fits in here.
1494 variant.size = cmp::max(variant.size, size);
1495 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1500 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1502 let tagged_layout = LayoutS {
1503 variants: Variants::Multiple {
1505 tag_encoding: TagEncoding::Direct,
1507 variants: IndexVec::new(),
1509 fields: FieldsShape::Arbitrary {
1510 offsets: vec![Size::ZERO],
1511 memory_index: vec![0],
1519 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1521 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1523 // Pick the smaller layout; otherwise,
1524 // pick the layout with the larger niche; otherwise,
1525 // pick tagged as it has simpler codegen.
1527 let niche_size = |tmp_l: &TmpLayout<'_>| {
1528 tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1531 tl.layout.size.cmp(&nl.layout.size),
1532 niche_size(&tl).cmp(&niche_size(&nl)),
1535 (Equal, Less) => nl,
1542 // Now we can intern the variant layouts and store them in the enum layout.
1543 best_layout.layout.variants = match best_layout.layout.variants {
1544 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1548 variants: best_layout
1551 .map(|layout| tcx.intern_layout(layout))
1557 tcx.intern_layout(best_layout.layout)
1560 // Types with no meaningful known layout.
1561 ty::Projection(_) | ty::Opaque(..) => {
1562 // NOTE(eddyb) `layout_of` query should've normalized these away,
1563 // if that was possible, so there's no reason to try again here.
1564 return Err(LayoutError::Unknown(ty));
1567 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1568 bug!("Layout::compute: unexpected type `{}`", ty)
1571 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1572 return Err(LayoutError::Unknown(ty));
1578 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1579 #[derive(Clone, Debug, PartialEq)]
1580 enum SavedLocalEligibility {
1582 Assigned(VariantIdx),
1583 // FIXME: Use newtype_index so we aren't wasting bytes
1584 Ineligible(Option<u32>),
1587 // When laying out generators, we divide our saved local fields into two
1588 // categories: overlap-eligible and overlap-ineligible.
1590 // Those fields which are ineligible for overlap go in a "prefix" at the
1591 // beginning of the layout, and always have space reserved for them.
1593 // Overlap-eligible fields are only assigned to one variant, so we lay
1594 // those fields out for each variant and put them right after the
1597 // Finally, in the layout details, we point to the fields from the
1598 // variants they are assigned to. It is possible for some fields to be
1599 // included in multiple variants. No field ever "moves around" in the
1600 // layout; its offset is always the same.
1602 // Also included in the layout are the upvars and the discriminant.
1603 // These are included as fields on the "outer" layout; they are not part
1605 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1606 /// Compute the eligibility and assignment of each local.
1607 fn generator_saved_local_eligibility(
1609 info: &GeneratorLayout<'tcx>,
1610 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1611 use SavedLocalEligibility::*;
1613 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1614 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1616 // The saved locals not eligible for overlap. These will get
1617 // "promoted" to the prefix of our generator.
1618 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1620 // Figure out which of our saved locals are fields in only
1621 // one variant. The rest are deemed ineligible for overlap.
1622 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1623 for local in fields {
1624 match assignments[*local] {
1626 assignments[*local] = Assigned(variant_index);
1629 // We've already seen this local at another suspension
1630 // point, so it is no longer a candidate.
1632 "removing local {:?} in >1 variant ({:?}, {:?})",
1637 ineligible_locals.insert(*local);
1638 assignments[*local] = Ineligible(None);
1645 // Next, check every pair of eligible locals to see if they
1647 for local_a in info.storage_conflicts.rows() {
1648 let conflicts_a = info.storage_conflicts.count(local_a);
1649 if ineligible_locals.contains(local_a) {
1653 for local_b in info.storage_conflicts.iter(local_a) {
1654 // local_a and local_b are storage live at the same time, therefore they
1655 // cannot overlap in the generator layout. The only way to guarantee
1656 // this is if they are in the same variant, or one is ineligible
1657 // (which means it is stored in every variant).
1658 if ineligible_locals.contains(local_b)
1659 || assignments[local_a] == assignments[local_b]
1664 // If they conflict, we will choose one to make ineligible.
1665 // This is not always optimal; it's just a greedy heuristic that
1666 // seems to produce good results most of the time.
1667 let conflicts_b = info.storage_conflicts.count(local_b);
1668 let (remove, other) =
1669 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1670 ineligible_locals.insert(remove);
1671 assignments[remove] = Ineligible(None);
1672 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1676 // Count the number of variants in use. If only one of them, then it is
1677 // impossible to overlap any locals in our layout. In this case it's
1678 // always better to make the remaining locals ineligible, so we can
1679 // lay them out with the other locals in the prefix and eliminate
1680 // unnecessary padding bytes.
1682 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1683 for assignment in &assignments {
1684 if let Assigned(idx) = assignment {
1685 used_variants.insert(*idx);
1688 if used_variants.count() < 2 {
1689 for assignment in assignments.iter_mut() {
1690 *assignment = Ineligible(None);
1692 ineligible_locals.insert_all();
1696 // Write down the order of our locals that will be promoted to the prefix.
1698 for (idx, local) in ineligible_locals.iter().enumerate() {
1699 assignments[local] = Ineligible(Some(idx as u32));
1702 debug!("generator saved local assignments: {:?}", assignments);
1704 (ineligible_locals, assignments)
1707 /// Compute the full generator layout.
1708 fn generator_layout(
1711 def_id: hir::def_id::DefId,
1712 substs: SubstsRef<'tcx>,
1713 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1714 use SavedLocalEligibility::*;
1716 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1718 let Some(info) = tcx.generator_layout(def_id) else {
1719 return Err(LayoutError::Unknown(ty));
1721 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1723 // Build a prefix layout, including "promoting" all ineligible
1724 // locals as part of the prefix. We compute the layout of all of
1725 // these fields at once to get optimal packing.
1726 let tag_index = substs.as_generator().prefix_tys().count();
1728 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1729 let max_discr = (info.variant_fields.len() - 1) as u128;
1730 let discr_int = Integer::fit_unsigned(max_discr);
1731 let discr_int_ty = discr_int.to_ty(tcx, false);
1732 let tag = Scalar::Initialized {
1733 value: Primitive::Int(discr_int, false),
1734 valid_range: WrappingRange { start: 0, end: max_discr },
1736 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1737 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1739 let promoted_layouts = ineligible_locals
1741 .map(|local| subst_field(info.field_tys[local]))
1742 .map(|ty| tcx.mk_maybe_uninit(ty))
1743 .map(|ty| self.layout_of(ty));
1744 let prefix_layouts = substs
1747 .map(|ty| self.layout_of(ty))
1748 .chain(iter::once(Ok(tag_layout)))
1749 .chain(promoted_layouts)
1750 .collect::<Result<Vec<_>, _>>()?;
1751 let prefix = self.univariant_uninterned(
1754 &ReprOptions::default(),
1755 StructKind::AlwaysSized,
1758 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1760 // Split the prefix layout into the "outer" fields (upvars and
1761 // discriminant) and the "promoted" fields. Promoted fields will
1762 // get included in each variant that requested them in
1764 debug!("prefix = {:#?}", prefix);
1765 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1766 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1767 let mut inverse_memory_index = invert_mapping(&memory_index);
1769 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1770 // "outer" and "promoted" fields respectively.
1771 let b_start = (tag_index + 1) as u32;
1772 let offsets_b = offsets.split_off(b_start as usize);
1773 let offsets_a = offsets;
1775 // Disentangle the "a" and "b" components of `inverse_memory_index`
1776 // by preserving the order but keeping only one disjoint "half" each.
1777 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1778 let inverse_memory_index_b: Vec<_> =
1779 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1780 inverse_memory_index.retain(|&i| i < b_start);
1781 let inverse_memory_index_a = inverse_memory_index;
1783 // Since `inverse_memory_index_{a,b}` each only refer to their
1784 // respective fields, they can be safely inverted
1785 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1786 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1789 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1790 (outer_fields, offsets_b, memory_index_b)
1795 let mut size = prefix.size;
1796 let mut align = prefix.align;
1800 .map(|(index, variant_fields)| {
1801 // Only include overlap-eligible fields when we compute our variant layout.
1802 let variant_only_tys = variant_fields
1804 .filter(|local| match assignments[**local] {
1805 Unassigned => bug!(),
1806 Assigned(v) if v == index => true,
1807 Assigned(_) => bug!("assignment does not match variant"),
1808 Ineligible(_) => false,
1810 .map(|local| subst_field(info.field_tys[*local]));
1812 let mut variant = self.univariant_uninterned(
1815 .map(|ty| self.layout_of(ty))
1816 .collect::<Result<Vec<_>, _>>()?,
1817 &ReprOptions::default(),
1818 StructKind::Prefixed(prefix_size, prefix_align.abi),
1820 variant.variants = Variants::Single { index };
1822 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1826 // Now, stitch the promoted and variant-only fields back together in
1827 // the order they are mentioned by our GeneratorLayout.
1828 // Because we only use some subset (that can differ between variants)
1829 // of the promoted fields, we can't just pick those elements of the
1830 // `promoted_memory_index` (as we'd end up with gaps).
1831 // So instead, we build an "inverse memory_index", as if all of the
1832 // promoted fields were being used, but leave the elements not in the
1833 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1834 // obtain a valid (bijective) mapping.
1835 const INVALID_FIELD_IDX: u32 = !0;
1836 let mut combined_inverse_memory_index =
1837 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1838 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1839 let combined_offsets = variant_fields
1843 let (offset, memory_index) = match assignments[*local] {
1844 Unassigned => bug!(),
1846 let (offset, memory_index) =
1847 offsets_and_memory_index.next().unwrap();
1848 (offset, promoted_memory_index.len() as u32 + memory_index)
1850 Ineligible(field_idx) => {
1851 let field_idx = field_idx.unwrap() as usize;
1852 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1855 combined_inverse_memory_index[memory_index as usize] = i as u32;
1860 // Remove the unused slots and invert the mapping to obtain the
1861 // combined `memory_index` (also see previous comment).
1862 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1863 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1865 variant.fields = FieldsShape::Arbitrary {
1866 offsets: combined_offsets,
1867 memory_index: combined_memory_index,
1870 size = size.max(variant.size);
1871 align = align.max(variant.align);
1872 Ok(tcx.intern_layout(variant))
1874 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1876 size = size.align_to(align.abi);
1879 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1882 Abi::Aggregate { sized: true }
1885 let layout = tcx.intern_layout(LayoutS {
1886 variants: Variants::Multiple {
1888 tag_encoding: TagEncoding::Direct,
1889 tag_field: tag_index,
1892 fields: outer_fields,
1894 largest_niche: prefix.largest_niche,
1898 debug!("generator layout ({:?}): {:#?}", ty, layout);
1902 /// This is invoked by the `layout_of` query to record the final
1903 /// layout of each type.
1905 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1906 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1907 // for dumping later.
1908 if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1909 self.record_layout_for_printing_outlined(layout)
1913 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1914 // Ignore layouts that are done with non-empty environments or
1915 // non-monomorphic layouts, as the user only wants to see the stuff
1916 // resulting from the final codegen session.
1917 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1921 // (delay format until we actually need it)
1922 let record = |kind, packed, opt_discr_size, variants| {
1923 let type_desc = format!("{:?}", layout.ty);
1924 self.tcx.sess.code_stats.record_type_size(
1935 let adt_def = match *layout.ty.kind() {
1936 ty::Adt(ref adt_def, _) => {
1937 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1941 ty::Closure(..) => {
1942 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1943 record(DataTypeKind::Closure, false, None, vec![]);
1948 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1953 let adt_kind = adt_def.adt_kind();
1954 let adt_packed = adt_def.repr().pack.is_some();
1956 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1957 let mut min_size = Size::ZERO;
1958 let field_info: Vec<_> = flds
1962 let field_layout = layout.field(self, i);
1963 let offset = layout.fields.offset(i);
1964 let field_end = offset + field_layout.size;
1965 if min_size < field_end {
1966 min_size = field_end;
1970 offset: offset.bytes(),
1971 size: field_layout.size.bytes(),
1972 align: field_layout.align.abi.bytes(),
1979 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1980 align: layout.align.abi.bytes(),
1981 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1986 match layout.variants {
1987 Variants::Single { index } => {
1988 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1990 "print-type-size `{:#?}` variant {}",
1992 adt_def.variant(index).name
1994 let variant_def = &adt_def.variant(index);
1995 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2000 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
2003 // (This case arises for *empty* enums; so give it
2005 record(adt_kind.into(), adt_packed, None, vec![]);
2009 Variants::Multiple { tag, ref tag_encoding, .. } => {
2011 "print-type-size `{:#?}` adt general variants def {}",
2013 adt_def.variants().len()
2015 let variant_infos: Vec<_> = adt_def
2018 .map(|(i, variant_def)| {
2019 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2021 Some(variant_def.name),
2023 layout.for_variant(self, i),
2030 match tag_encoding {
2031 TagEncoding::Direct => Some(tag.size(self)),
2041 /// Type size "skeleton", i.e., the only information determining a type's size.
2042 /// While this is conservative, (aside from constant sizes, only pointers,
2043 /// newtypes thereof and null pointer optimized enums are allowed), it is
2044 /// enough to statically check common use cases of transmute.
2045 #[derive(Copy, Clone, Debug)]
2046 pub enum SizeSkeleton<'tcx> {
2047 /// Any statically computable Layout.
2050 /// A potentially-fat pointer.
2052 /// If true, this pointer is never null.
2054 /// The type which determines the unsized metadata, if any,
2055 /// of this pointer. Either a type parameter or a projection
2056 /// depending on one, with regions erased.
2061 impl<'tcx> SizeSkeleton<'tcx> {
2065 param_env: ty::ParamEnv<'tcx>,
2066 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2067 debug_assert!(!ty.has_infer_types_or_consts());
2069 // First try computing a static layout.
2070 let err = match tcx.layout_of(param_env.and(ty)) {
2072 return Ok(SizeSkeleton::Known(layout.size));
2078 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2079 let non_zero = !ty.is_unsafe_ptr();
2080 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2082 ty::Param(_) | ty::Projection(_) => {
2083 debug_assert!(tail.has_param_types_or_consts());
2084 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2087 "SizeSkeleton::compute({}): layout errored ({}), yet \
2088 tail `{}` is not a type parameter or a projection",
2096 ty::Adt(def, substs) => {
2097 // Only newtypes and enums w/ nullable pointer optimization.
2098 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2102 // Get a zero-sized variant or a pointer newtype.
2103 let zero_or_ptr_variant = |i| {
2104 let i = VariantIdx::new(i);
2106 def.variant(i).fields.iter().map(|field| {
2107 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2110 for field in fields {
2113 SizeSkeleton::Known(size) => {
2114 if size.bytes() > 0 {
2118 SizeSkeleton::Pointer { .. } => {
2129 let v0 = zero_or_ptr_variant(0)?;
2131 if def.variants().len() == 1 {
2132 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2133 return Ok(SizeSkeleton::Pointer {
2135 || match tcx.layout_scalar_valid_range(def.did()) {
2136 (Bound::Included(start), Bound::Unbounded) => start > 0,
2137 (Bound::Included(start), Bound::Included(end)) => {
2138 0 < start && start < end
2149 let v1 = zero_or_ptr_variant(1)?;
2150 // Nullable pointer enum optimization.
2152 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2153 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2154 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2160 ty::Projection(_) | ty::Opaque(..) => {
2161 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2162 if ty == normalized {
2165 SizeSkeleton::compute(normalized, tcx, param_env)
2173 pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2174 match (self, other) {
2175 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2176 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2184 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2185 fn tcx(&self) -> TyCtxt<'tcx>;
2188 pub trait HasParamEnv<'tcx> {
2189 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2192 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2194 fn data_layout(&self) -> &TargetDataLayout {
2199 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2200 fn target_spec(&self) -> &Target {
2205 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2207 fn tcx(&self) -> TyCtxt<'tcx> {
2212 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2214 fn data_layout(&self) -> &TargetDataLayout {
2219 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2220 fn target_spec(&self) -> &Target {
2225 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2227 fn tcx(&self) -> TyCtxt<'tcx> {
2232 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2233 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2238 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2239 fn data_layout(&self) -> &TargetDataLayout {
2240 self.tcx.data_layout()
2244 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2245 fn target_spec(&self) -> &Target {
2246 self.tcx.target_spec()
2250 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2251 fn tcx(&self) -> TyCtxt<'tcx> {
2256 pub trait MaybeResult<T> {
2259 fn from(x: Result<T, Self::Error>) -> Self;
2260 fn to_result(self) -> Result<T, Self::Error>;
2263 impl<T> MaybeResult<T> for T {
2266 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2269 fn to_result(self) -> Result<T, Self::Error> {
2274 impl<T, E> MaybeResult<T> for Result<T, E> {
2277 fn from(x: Result<T, Self::Error>) -> Self {
2280 fn to_result(self) -> Result<T, Self::Error> {
2285 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2287 /// Trait for contexts that want to be able to compute layouts of types.
2288 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2289 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2290 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2291 /// returned from `layout_of` (see also `handle_layout_err`).
2292 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2294 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2295 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2297 fn layout_tcx_at_span(&self) -> Span {
2301 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2302 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2304 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2305 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2306 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2307 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2308 fn handle_layout_err(
2310 err: LayoutError<'tcx>,
2313 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2316 /// Blanket extension trait for contexts that can compute layouts of types.
2317 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2318 /// Computes the layout of a type. Note that this implicitly
2319 /// executes in "reveal all" mode, and will normalize the input type.
2321 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2322 self.spanned_layout_of(ty, DUMMY_SP)
2325 /// Computes the layout of a type, at `span`. Note that this implicitly
2326 /// executes in "reveal all" mode, and will normalize the input type.
2327 // FIXME(eddyb) avoid passing information like this, and instead add more
2328 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2330 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2331 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2332 let tcx = self.tcx().at(span);
2335 tcx.layout_of(self.param_env().and(ty))
2336 .map_err(|err| self.handle_layout_err(err, span, ty)),
2341 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2343 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2344 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2347 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2352 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2353 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2356 fn layout_tcx_at_span(&self) -> Span {
2361 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2366 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2368 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2370 fn ty_and_layout_for_variant(
2371 this: TyAndLayout<'tcx>,
2373 variant_index: VariantIdx,
2374 ) -> TyAndLayout<'tcx> {
2375 let layout = match this.variants {
2376 Variants::Single { index }
2377 // If all variants but one are uninhabited, the variant layout is the enum layout.
2378 if index == variant_index &&
2379 // Don't confuse variants of uninhabited enums with the enum itself.
2380 // For more details see https://github.com/rust-lang/rust/issues/69763.
2381 this.fields != FieldsShape::Primitive =>
2386 Variants::Single { index } => {
2388 let param_env = cx.param_env();
2390 // Deny calling for_variant more than once for non-Single enums.
2391 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2392 assert_eq!(original_layout.variants, Variants::Single { index });
2395 let fields = match this.ty.kind() {
2396 ty::Adt(def, _) if def.variants().is_empty() =>
2397 bug!("for_variant called on zero-variant enum"),
2398 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2401 tcx.intern_layout(LayoutS {
2402 variants: Variants::Single { index: variant_index },
2403 fields: match NonZeroUsize::new(fields) {
2404 Some(fields) => FieldsShape::Union(fields),
2405 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2407 abi: Abi::Uninhabited,
2408 largest_niche: None,
2409 align: tcx.data_layout.i8_align,
2414 Variants::Multiple { ref variants, .. } => variants[variant_index],
2417 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2419 TyAndLayout { ty: this.ty, layout }
2422 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2423 enum TyMaybeWithLayout<'tcx> {
2425 TyAndLayout(TyAndLayout<'tcx>),
2428 fn field_ty_or_layout<'tcx>(
2429 this: TyAndLayout<'tcx>,
2430 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2432 ) -> TyMaybeWithLayout<'tcx> {
2434 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2436 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2437 ty: tag.primitive().to_ty(tcx),
2441 match *this.ty.kind() {
2450 | ty::GeneratorWitness(..)
2452 | ty::Dynamic(_, _, ty::Dyn) => {
2453 bug!("TyAndLayout::field({:?}): not applicable", this)
2456 // Potentially-fat pointers.
2457 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2458 assert!(i < this.fields.count());
2460 // Reuse the fat `*T` type as its own thin pointer data field.
2461 // This provides information about, e.g., DST struct pointees
2462 // (which may have no non-DST form), and will work as long
2463 // as the `Abi` or `FieldsShape` is checked by users.
2465 let nil = tcx.mk_unit();
2466 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2469 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2472 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2473 // the `Result` should always work because the type is
2474 // always either `*mut ()` or `&'static mut ()`.
2475 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2477 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2481 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2482 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2483 ty::Dynamic(_, _, ty::Dyn) => {
2484 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2485 tcx.lifetimes.re_static,
2486 tcx.mk_array(tcx.types.usize, 3),
2488 /* FIXME: use actual fn pointers
2489 Warning: naively computing the number of entries in the
2490 vtable by counting the methods on the trait + methods on
2491 all parent traits does not work, because some methods can
2492 be not object safe and thus excluded from the vtable.
2493 Increase this counter if you tried to implement this but
2494 failed to do it without duplicating a lot of code from
2495 other places in the compiler: 2
2497 tcx.mk_array(tcx.types.usize, 3),
2498 tcx.mk_array(Option<fn()>),
2502 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2506 // Arrays and slices.
2507 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2508 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2510 // Tuples, generators and closures.
2511 ty::Closure(_, ref substs) => field_ty_or_layout(
2512 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2517 ty::Generator(def_id, ref substs, _) => match this.variants {
2518 Variants::Single { index } => TyMaybeWithLayout::Ty(
2521 .state_tys(def_id, tcx)
2522 .nth(index.as_usize())
2527 Variants::Multiple { tag, tag_field, .. } => {
2529 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2531 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2535 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2538 ty::Adt(def, substs) => {
2539 match this.variants {
2540 Variants::Single { index } => {
2541 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2544 // Discriminant field for enums (where applicable).
2545 Variants::Multiple { tag, .. } => {
2547 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2552 ty::Dynamic(_, _, ty::DynStar) => {
2554 TyMaybeWithLayout::Ty(tcx.types.usize)
2556 // FIXME(dyn-star) same FIXME as above applies here too
2557 TyMaybeWithLayout::Ty(
2559 tcx.lifetimes.re_static,
2560 tcx.mk_array(tcx.types.usize, 3),
2564 bug!("no field {i} on dyn*")
2570 | ty::Placeholder(..)
2574 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2578 match field_ty_or_layout(this, cx, i) {
2579 TyMaybeWithLayout::Ty(field_ty) => {
2580 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2582 "failed to get layout for `{}`: {},\n\
2583 despite it being a field (#{}) of an existing layout: {:#?}",
2591 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2595 fn ty_and_layout_pointee_info_at(
2596 this: TyAndLayout<'tcx>,
2599 ) -> Option<PointeeInfo> {
2601 let param_env = cx.param_env();
2603 let addr_space_of_ty = |ty: Ty<'tcx>| {
2604 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2607 let pointee_info = match *this.ty.kind() {
2608 ty::RawPtr(mt) if offset.bytes() == 0 => {
2609 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2611 align: layout.align.abi,
2613 address_space: addr_space_of_ty(mt.ty),
2616 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2617 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2619 align: layout.align.abi,
2621 address_space: cx.data_layout().instruction_address_space,
2624 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2625 let address_space = addr_space_of_ty(ty);
2626 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2627 // Use conservative pointer kind if not optimizing. This saves us the
2628 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2629 // attributes in LLVM have compile-time cost even in unoptimized builds).
2630 PointerKind::SharedMutable
2633 hir::Mutability::Not => {
2634 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2637 PointerKind::SharedMutable
2640 hir::Mutability::Mut => {
2641 // References to self-referential structures should not be considered
2642 // noalias, as another pointer to the structure can be obtained, that
2643 // is not based-on the original reference. We consider all !Unpin
2644 // types to be potentially self-referential here.
2645 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2646 PointerKind::UniqueBorrowed
2648 PointerKind::UniqueBorrowedPinned
2654 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2656 align: layout.align.abi,
2663 let mut data_variant = match this.variants {
2664 // Within the discriminant field, only the niche itself is
2665 // always initialized, so we only check for a pointer at its
2668 // If the niche is a pointer, it's either valid (according
2669 // to its type), or null (which the niche field's scalar
2670 // validity range encodes). This allows using
2671 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2672 // this will continue to work as long as we don't start
2673 // using more niches than just null (e.g., the first page of
2674 // the address space, or unaligned pointers).
2675 Variants::Multiple {
2676 tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2679 } if this.fields.offset(tag_field) == offset => {
2680 Some(this.for_variant(cx, untagged_variant))
2685 if let Some(variant) = data_variant {
2686 // We're not interested in any unions.
2687 if let FieldsShape::Union(_) = variant.fields {
2688 data_variant = None;
2692 let mut result = None;
2694 if let Some(variant) = data_variant {
2695 let ptr_end = offset + Pointer.size(cx);
2696 for i in 0..variant.fields.count() {
2697 let field_start = variant.fields.offset(i);
2698 if field_start <= offset {
2699 let field = variant.field(cx, i);
2700 result = field.to_result().ok().and_then(|field| {
2701 if ptr_end <= field_start + field.size {
2702 // We found the right field, look inside it.
2704 field.pointee_info_at(cx, offset - field_start);
2710 if result.is_some() {
2717 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2718 if let Some(ref mut pointee) = result {
2719 if let ty::Adt(def, _) = this.ty.kind() {
2720 if def.is_box() && offset.bytes() == 0 {
2721 pointee.safe = Some(PointerKind::UniqueOwned);
2731 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2740 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2741 matches!(this.ty.kind(), ty::Adt(..))
2744 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2745 this.ty.kind() == &ty::Never
2748 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2749 matches!(this.ty.kind(), ty::Tuple(..))
2752 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2753 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2757 impl<'tcx> ty::Instance<'tcx> {
2758 // NOTE(eddyb) this is private to avoid using it from outside of
2759 // `fn_abi_of_instance` - any other uses are either too high-level
2760 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2761 // or should go through `FnAbi` instead, to avoid losing any
2762 // adjustments `fn_abi_of_instance` might be performing.
2763 #[tracing::instrument(level = "debug", skip(tcx, param_env))]
2764 fn fn_sig_for_fn_abi(
2767 param_env: ty::ParamEnv<'tcx>,
2768 ) -> ty::PolyFnSig<'tcx> {
2769 let ty = self.ty(tcx, param_env);
2772 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2773 // parameters unused if they show up in the signature, but not in the `mir::Body`
2774 // (i.e. due to being inside a projection that got normalized, see
2775 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2776 // track of a polymorphization `ParamEnv` to allow normalizing later.
2778 // We normalize the `fn_sig` again after substituting at a later point.
2779 let mut sig = match *ty.kind() {
2780 ty::FnDef(def_id, substs) => tcx
2781 .bound_fn_sig(def_id)
2782 .map_bound(|fn_sig| {
2783 tcx.normalize_erasing_regions(tcx.param_env(def_id), fn_sig)
2785 .subst(tcx, substs),
2786 _ => unreachable!(),
2789 if let ty::InstanceDef::VTableShim(..) = self.def {
2790 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2791 sig = sig.map_bound(|mut sig| {
2792 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2793 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2794 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2800 ty::Closure(def_id, substs) => {
2801 let sig = substs.as_closure().sig();
2803 let bound_vars = tcx.mk_bound_variable_kinds(
2806 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2808 let br = ty::BoundRegion {
2809 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2810 kind: ty::BoundRegionKind::BrEnv,
2812 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2813 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2815 let sig = sig.skip_binder();
2816 ty::Binder::bind_with_vars(
2818 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2827 ty::Generator(_, substs, _) => {
2828 let sig = substs.as_generator().poly_sig();
2830 let bound_vars = tcx.mk_bound_variable_kinds(
2833 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2835 let br = ty::BoundRegion {
2836 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2837 kind: ty::BoundRegionKind::BrEnv,
2839 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2840 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2842 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2843 let pin_adt_ref = tcx.adt_def(pin_did);
2844 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2845 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2847 let sig = sig.skip_binder();
2848 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2849 let state_adt_ref = tcx.adt_def(state_did);
2850 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2851 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2852 ty::Binder::bind_with_vars(
2854 [env_ty, sig.resume_ty].iter(),
2857 hir::Unsafety::Normal,
2858 rustc_target::spec::abi::Abi::Rust,
2863 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2868 /// Calculates whether a function's ABI can unwind or not.
2870 /// This takes two primary parameters:
2872 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2873 /// codegen attrs for a defined function. For function pointers this set of
2874 /// flags is the empty set. This is only applicable for Rust-defined
2875 /// functions, and generally isn't needed except for small optimizations where
2876 /// we try to say a function which otherwise might look like it could unwind
2877 /// doesn't actually unwind (such as for intrinsics and such).
2879 /// * `abi` - this is the ABI that the function is defined with. This is the
2880 /// primary factor for determining whether a function can unwind or not.
2882 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2883 /// panics are implemented with unwinds on most platform (when
2884 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2885 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2886 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2887 /// defined for each ABI individually, but it always corresponds to some form of
2888 /// stack-based unwinding (the exact mechanism of which varies
2889 /// platform-by-platform).
2891 /// Rust functions are classified whether or not they can unwind based on the
2892 /// active "panic strategy". In other words Rust functions are considered to
2893 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2894 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2895 /// only if the final panic mode is panic=abort. In this scenario any code
2896 /// previously compiled assuming that a function can unwind is still correct, it
2897 /// just never happens to actually unwind at runtime.
2899 /// This function's answer to whether or not a function can unwind is quite
2900 /// impactful throughout the compiler. This affects things like:
2902 /// * Calling a function which can't unwind means codegen simply ignores any
2903 /// associated unwinding cleanup.
2904 /// * Calling a function which can unwind from a function which can't unwind
2905 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2906 /// aborts the process.
2907 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2908 /// affects various optimizations and codegen.
2910 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2911 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2912 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2913 /// might (from a foreign exception or similar).
2915 #[tracing::instrument(level = "debug", skip(tcx))]
2916 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2917 if let Some(did) = fn_def_id {
2918 // Special attribute for functions which can't unwind.
2919 if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2923 // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2925 // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2926 // function defined in Rust is also required to abort.
2927 if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2931 // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2933 // This is not part of `codegen_fn_attrs` as it can differ between crates
2934 // and therefore cannot be computed in core.
2935 if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2936 if Some(did) == tcx.lang_items().drop_in_place_fn() {
2942 // Otherwise if this isn't special then unwinding is generally determined by
2943 // the ABI of the itself. ABIs like `C` have variants which also
2944 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2945 // ABIs have such an option. Otherwise the only other thing here is Rust
2946 // itself, and those ABIs are determined by the panic strategy configured
2947 // for this compilation.
2949 // Unfortunately at this time there's also another caveat. Rust [RFC
2950 // 2945][rfc] has been accepted and is in the process of being implemented
2951 // and stabilized. In this interim state we need to deal with historical
2952 // rustc behavior as well as plan for future rustc behavior.
2954 // Historically functions declared with `extern "C"` were marked at the
2955 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2956 // or not. This is UB for functions in `panic=unwind` mode that then
2957 // actually panic and unwind. Note that this behavior is true for both
2958 // externally declared functions as well as Rust-defined function.
2960 // To fix this UB rustc would like to change in the future to catch unwinds
2961 // from function calls that may unwind within a Rust-defined `extern "C"`
2962 // function and forcibly abort the process, thereby respecting the
2963 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2964 // ready to roll out, so determining whether or not the `C` family of ABIs
2965 // unwinds is conditional not only on their definition but also whether the
2966 // `#![feature(c_unwind)]` feature gate is active.
2968 // Note that this means that unlike historical compilers rustc now, by
2969 // default, unconditionally thinks that the `C` ABI may unwind. This will
2970 // prevent some optimization opportunities, however, so we try to scope this
2971 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2972 // to `panic=abort`).
2974 // Eventually the check against `c_unwind` here will ideally get removed and
2975 // this'll be a little cleaner as it'll be a straightforward check of the
2978 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2984 | Stdcall { unwind }
2985 | Fastcall { unwind }
2986 | Vectorcall { unwind }
2987 | Thiscall { unwind }
2990 | SysV64 { unwind } => {
2992 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
3000 | AvrNonBlockingInterrupt
3001 | CCmseNonSecureCall
3005 | Unadjusted => false,
3006 Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
3011 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
3012 use rustc_target::spec::abi::Abi::*;
3013 match tcx.sess.target.adjust_abi(abi) {
3014 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
3015 RustCold => Conv::RustCold,
3017 // It's the ABI's job to select this, not ours.
3018 System { .. } => bug!("system abi should be selected elsewhere"),
3019 EfiApi => bug!("eficall abi should be selected elsewhere"),
3021 Stdcall { .. } => Conv::X86Stdcall,
3022 Fastcall { .. } => Conv::X86Fastcall,
3023 Vectorcall { .. } => Conv::X86VectorCall,
3024 Thiscall { .. } => Conv::X86ThisCall,
3025 C { .. } => Conv::C,
3026 Unadjusted => Conv::C,
3027 Win64 { .. } => Conv::X86_64Win64,
3028 SysV64 { .. } => Conv::X86_64SysV,
3029 Aapcs { .. } => Conv::ArmAapcs,
3030 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
3031 PtxKernel => Conv::PtxKernel,
3032 Msp430Interrupt => Conv::Msp430Intr,
3033 X86Interrupt => Conv::X86Intr,
3034 AmdGpuKernel => Conv::AmdGpuKernel,
3035 AvrInterrupt => Conv::AvrInterrupt,
3036 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3039 // These API constants ought to be more specific...
3040 Cdecl { .. } => Conv::C,
3044 /// Error produced by attempting to compute or adjust a `FnAbi`.
3045 #[derive(Copy, Clone, Debug, HashStable)]
3046 pub enum FnAbiError<'tcx> {
3047 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3048 Layout(LayoutError<'tcx>),
3050 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3051 AdjustForForeignAbi(call::AdjustForForeignAbiError),
3054 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3055 fn from(err: LayoutError<'tcx>) -> Self {
3060 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3061 fn from(err: call::AdjustForForeignAbiError) -> Self {
3062 Self::AdjustForForeignAbi(err)
3066 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3067 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3069 Self::Layout(err) => err.fmt(f),
3070 Self::AdjustForForeignAbi(err) => err.fmt(f),
3075 impl<'tcx> IntoDiagnostic<'tcx, !> for FnAbiError<'tcx> {
3076 fn into_diagnostic(self, handler: &'tcx Handler) -> DiagnosticBuilder<'tcx, !> {
3077 handler.struct_fatal(self.to_string())
3081 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3082 // just for error handling.
3084 pub enum FnAbiRequest<'tcx> {
3085 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3086 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3089 /// Trait for contexts that want to be able to compute `FnAbi`s.
3090 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3091 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3092 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3093 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3094 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3096 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3097 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3099 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3100 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3101 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3102 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3103 fn handle_fn_abi_err(
3105 err: FnAbiError<'tcx>,
3107 fn_abi_request: FnAbiRequest<'tcx>,
3108 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3111 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3112 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3113 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3115 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3116 /// instead, where the instance is an `InstanceDef::Virtual`.
3118 fn fn_abi_of_fn_ptr(
3120 sig: ty::PolyFnSig<'tcx>,
3121 extra_args: &'tcx ty::List<Ty<'tcx>>,
3122 ) -> Self::FnAbiOfResult {
3123 // FIXME(eddyb) get a better `span` here.
3124 let span = self.layout_tcx_at_span();
3125 let tcx = self.tcx().at(span);
3127 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3128 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3132 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3133 /// direct calls to an `fn`.
3135 /// NB: that includes virtual calls, which are represented by "direct calls"
3136 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3138 #[tracing::instrument(level = "debug", skip(self))]
3139 fn fn_abi_of_instance(
3141 instance: ty::Instance<'tcx>,
3142 extra_args: &'tcx ty::List<Ty<'tcx>>,
3143 ) -> Self::FnAbiOfResult {
3144 // FIXME(eddyb) get a better `span` here.
3145 let span = self.layout_tcx_at_span();
3146 let tcx = self.tcx().at(span);
3149 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3150 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3151 // we can get some kind of span even if one wasn't provided.
3152 // However, we don't do this early in order to avoid calling
3153 // `def_span` unconditionally (which may have a perf penalty).
3154 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3155 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3161 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3163 fn fn_abi_of_fn_ptr<'tcx>(
3165 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3166 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3167 let (param_env, (sig, extra_args)) = query.into_parts();
3169 LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3172 fn fn_abi_of_instance<'tcx>(
3174 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3175 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3176 let (param_env, (instance, extra_args)) = query.into_parts();
3178 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3180 let caller_location = if instance.def.requires_caller_location(tcx) {
3181 Some(tcx.caller_location_ty())
3186 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3190 Some(instance.def_id()),
3191 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3195 // Handle safe Rust thin and fat pointers.
3196 pub fn adjust_for_rust_scalar<'tcx>(
3197 cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3198 attrs: &mut ArgAttributes,
3200 layout: TyAndLayout<'tcx>,
3204 // Booleans are always a noundef i1 that needs to be zero-extended.
3205 if scalar.is_bool() {
3206 attrs.ext(ArgExtension::Zext);
3207 attrs.set(ArgAttribute::NoUndef);
3211 // Scalars which have invalid values cannot be undef.
3212 if !scalar.is_always_valid(&cx) {
3213 attrs.set(ArgAttribute::NoUndef);
3216 // Only pointer types handled below.
3217 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3219 if !valid_range.contains(0) {
3220 attrs.set(ArgAttribute::NonNull);
3223 if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3224 if let Some(kind) = pointee.safe {
3225 attrs.pointee_align = Some(pointee.align);
3227 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3228 // for the entire duration of the function as they can be deallocated
3229 // at any time. Same for shared mutable references. If LLVM had a
3230 // way to say "dereferenceable on entry" we could use it here.
3231 attrs.pointee_size = match kind {
3232 PointerKind::UniqueBorrowed
3233 | PointerKind::UniqueBorrowedPinned
3234 | PointerKind::Frozen => pointee.size,
3235 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3238 // `Box`, `&T`, and `&mut T` cannot be undef.
3239 // Note that this only applies to the value of the pointer itself;
3240 // this attribute doesn't make it UB for the pointed-to data to be undef.
3241 attrs.set(ArgAttribute::NoUndef);
3243 // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3244 // `noalias` for it. This can be turned off using an unstable flag.
3245 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3246 let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3248 // `&mut` pointer parameters never alias other parameters,
3249 // or mutable global data
3251 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3252 // and can be marked as both `readonly` and `noalias`, as
3253 // LLVM's definition of `noalias` is based solely on memory
3254 // dependencies rather than pointer equality
3256 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3257 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3258 // or not to actually emit the attribute. It can also be controlled with the
3259 // `-Zmutable-noalias` debugging option.
3260 let no_alias = match kind {
3261 PointerKind::SharedMutable
3262 | PointerKind::UniqueBorrowed
3263 | PointerKind::UniqueBorrowedPinned => false,
3264 PointerKind::UniqueOwned => noalias_for_box,
3265 PointerKind::Frozen => !is_return,
3268 attrs.set(ArgAttribute::NoAlias);
3271 if kind == PointerKind::Frozen && !is_return {
3272 attrs.set(ArgAttribute::ReadOnly);
3275 if kind == PointerKind::UniqueBorrowed && !is_return {
3276 attrs.set(ArgAttribute::NoAliasMutRef);
3282 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3283 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3284 // arguments of this method, into a separate `struct`.
3285 #[tracing::instrument(
3287 skip(self, caller_location, fn_def_id, force_thin_self_ptr)
3289 fn fn_abi_new_uncached(
3291 sig: ty::PolyFnSig<'tcx>,
3292 extra_args: &[Ty<'tcx>],
3293 caller_location: Option<Ty<'tcx>>,
3294 fn_def_id: Option<DefId>,
3295 // FIXME(eddyb) replace this with something typed, like an `enum`.
3296 force_thin_self_ptr: bool,
3297 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3298 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3300 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3302 let mut inputs = sig.inputs();
3303 let extra_args = if sig.abi == RustCall {
3304 assert!(!sig.c_variadic && extra_args.is_empty());
3306 if let Some(input) = sig.inputs().last() {
3307 if let ty::Tuple(tupled_arguments) = input.kind() {
3308 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3312 "argument to function with \"rust-call\" ABI \
3318 "argument to function with \"rust-call\" ABI \
3323 assert!(sig.c_variadic || extra_args.is_empty());
3327 let target = &self.tcx.sess.target;
3328 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3329 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3330 let linux_s390x_gnu_like =
3331 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3332 let linux_sparc64_gnu_like =
3333 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3334 let linux_powerpc_gnu_like =
3335 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3337 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3339 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3340 let span = tracing::debug_span!("arg_of");
3341 let _entered = span.enter();
3342 let is_return = arg_idx.is_none();
3344 let layout = self.layout_of(ty)?;
3345 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3346 // Don't pass the vtable, it's not an argument of the virtual fn.
3347 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3348 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3349 make_thin_self_ptr(self, layout)
3354 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3355 let mut attrs = ArgAttributes::new();
3356 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3360 if arg.layout.is_zst() {
3361 // For some forsaken reason, x86_64-pc-windows-gnu
3362 // doesn't ignore zero-sized struct arguments.
3363 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3367 && !linux_s390x_gnu_like
3368 && !linux_sparc64_gnu_like
3369 && !linux_powerpc_gnu_like)
3371 arg.mode = PassMode::Ignore;
3378 let mut fn_abi = FnAbi {
3379 ret: arg_of(sig.output(), None)?,
3383 .chain(extra_args.iter().copied())
3384 .chain(caller_location)
3386 .map(|(i, ty)| arg_of(ty, Some(i)))
3387 .collect::<Result<_, _>>()?,
3388 c_variadic: sig.c_variadic,
3389 fixed_count: inputs.len() as u32,
3391 can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3393 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3394 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3395 Ok(self.tcx.arena.alloc(fn_abi))
3398 #[tracing::instrument(level = "trace", skip(self))]
3399 fn fn_abi_adjust_for_abi(
3401 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3403 ) -> Result<(), FnAbiError<'tcx>> {
3404 if abi == SpecAbi::Unadjusted {
3408 if abi == SpecAbi::Rust
3409 || abi == SpecAbi::RustCall
3410 || abi == SpecAbi::RustIntrinsic
3411 || abi == SpecAbi::PlatformIntrinsic
3413 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3414 if arg.is_ignore() {
3418 match arg.layout.abi {
3419 Abi::Aggregate { .. } => {}
3421 // This is a fun case! The gist of what this is doing is
3422 // that we want callers and callees to always agree on the
3423 // ABI of how they pass SIMD arguments. If we were to *not*
3424 // make these arguments indirect then they'd be immediates
3425 // in LLVM, which means that they'd used whatever the
3426 // appropriate ABI is for the callee and the caller. That
3427 // means, for example, if the caller doesn't have AVX
3428 // enabled but the callee does, then passing an AVX argument
3429 // across this boundary would cause corrupt data to show up.
3431 // This problem is fixed by unconditionally passing SIMD
3432 // arguments through memory between callers and callees
3433 // which should get them all to agree on ABI regardless of
3434 // target feature sets. Some more information about this
3435 // issue can be found in #44367.
3437 // Note that the platform intrinsic ABI is exempt here as
3438 // that's how we connect up to LLVM and it's unstable
3439 // anyway, we control all calls to it in libstd.
3441 if abi != SpecAbi::PlatformIntrinsic
3442 && self.tcx.sess.target.simd_types_indirect =>
3444 arg.make_indirect();
3451 let size = arg.layout.size;
3452 if arg.layout.is_unsized() || size > Pointer.size(self) {
3453 arg.make_indirect();
3455 // We want to pass small aggregates as immediates, but using
3456 // a LLVM aggregate type for this leads to bad optimizations,
3457 // so we pick an appropriately sized integer type instead.
3458 arg.cast_to(Reg { kind: RegKind::Integer, size });
3461 fixup(&mut fn_abi.ret);
3462 for arg in fn_abi.args.iter_mut() {
3466 fn_abi.adjust_for_foreign_abi(self, abi)?;
3473 #[tracing::instrument(level = "debug", skip(cx))]
3474 fn make_thin_self_ptr<'tcx>(
3475 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3476 layout: TyAndLayout<'tcx>,
3477 ) -> TyAndLayout<'tcx> {
3479 let fat_pointer_ty = if layout.is_unsized() {
3480 // unsized `self` is passed as a pointer to `self`
3481 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3482 tcx.mk_mut_ptr(layout.ty)
3485 Abi::ScalarPair(..) | Abi::Scalar(..) => (),
3486 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3489 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3490 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3491 // elsewhere in the compiler as a method on a `dyn Trait`.
3492 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3493 // get a built-in pointer type
3494 let mut fat_pointer_layout = layout;
3495 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3496 && !fat_pointer_layout.ty.is_region_ptr()
3498 for i in 0..fat_pointer_layout.fields.count() {
3499 let field_layout = fat_pointer_layout.field(cx, i);
3501 if !field_layout.is_zst() {
3502 fat_pointer_layout = field_layout;
3503 continue 'descend_newtypes;
3507 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3510 fat_pointer_layout.ty
3513 // we now have a type like `*mut RcBox<dyn Trait>`
3514 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3515 // this is understood as a special case elsewhere in the compiler
3516 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3521 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3522 // should always work because the type is always `*mut ()`.
3523 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()