1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::normalize_erasing_regions::NormalizationError;
4 use crate::ty::subst::Subst;
6 self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
10 use rustc_attr as attr;
12 use rustc_hir::def_id::DefId;
13 use rustc_hir::lang_items::LangItem;
14 use rustc_index::bit_set::BitSet;
15 use rustc_index::vec::{Idx, IndexVec};
16 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
17 use rustc_span::symbol::Symbol;
18 use rustc_span::{Span, DUMMY_SP};
19 use rustc_target::abi::call::{
20 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
22 use rustc_target::abi::*;
23 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
25 use std::cmp::{self, Ordering};
28 use std::num::NonZeroUsize;
31 use rand::{seq::SliceRandom, SeedableRng};
32 use rand_xoshiro::Xoshiro128StarStar;
34 pub fn provide(providers: &mut ty::query::Providers) {
36 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
39 pub trait IntegerExt {
40 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
41 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
43 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
53 impl IntegerExt for Integer {
55 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
56 match (*self, signed) {
57 (I8, false) => tcx.types.u8,
58 (I16, false) => tcx.types.u16,
59 (I32, false) => tcx.types.u32,
60 (I64, false) => tcx.types.u64,
61 (I128, false) => tcx.types.u128,
62 (I8, true) => tcx.types.i8,
63 (I16, true) => tcx.types.i16,
64 (I32, true) => tcx.types.i32,
65 (I64, true) => tcx.types.i64,
66 (I128, true) => tcx.types.i128,
70 /// Gets the Integer type from an attr::IntType.
71 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
72 let dl = cx.data_layout();
75 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
76 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
77 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
78 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
79 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
80 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
81 dl.ptr_sized_integer()
86 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
89 ty::IntTy::I16 => I16,
90 ty::IntTy::I32 => I32,
91 ty::IntTy::I64 => I64,
92 ty::IntTy::I128 => I128,
93 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
96 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
99 ty::UintTy::U16 => I16,
100 ty::UintTy::U32 => I32,
101 ty::UintTy::U64 => I64,
102 ty::UintTy::U128 => I128,
103 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
107 /// Finds the appropriate Integer type and signedness for the given
108 /// signed discriminant range and `#[repr]` attribute.
109 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
110 /// that shouldn't affect anything, other than maybe debuginfo.
117 ) -> (Integer, bool) {
118 // Theoretically, negative values could be larger in unsigned representation
119 // than the unsigned representation of the signed minimum. However, if there
120 // are any negative values, the only valid unsigned representation is u128
121 // which can fit all i128 values, so the result remains unaffected.
122 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
123 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
125 if let Some(ity) = repr.int {
126 let discr = Integer::from_attr(&tcx, ity);
127 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
130 "Integer::repr_discr: `#[repr]` hint too small for \
131 discriminant range of enum `{}",
135 return (discr, ity.is_signed());
138 let at_least = if repr.c() {
139 // This is usually I32, however it can be different on some platforms,
140 // notably hexagon and arm-none/thumb-none
141 tcx.data_layout().c_enum_min_size
143 // repr(Rust) enums try to be as small as possible
147 // If there are no negative values, we can use the unsigned fit.
149 (cmp::max(unsigned_fit, at_least), false)
151 (cmp::max(signed_fit, at_least), true)
156 pub trait PrimitiveExt {
157 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
161 impl PrimitiveExt for Primitive {
163 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
165 Int(i, signed) => i.to_ty(tcx, signed),
166 F32 => tcx.types.f32,
167 F64 => tcx.types.f64,
168 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
172 /// Return an *integer* type matching this primitive.
173 /// Useful in particular when dealing with enum discriminants.
175 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
177 Int(i, signed) => i.to_ty(tcx, signed),
178 Pointer => tcx.types.usize,
179 F32 | F64 => bug!("floats do not have an int type"),
184 /// The first half of a fat pointer.
186 /// - For a trait object, this is the address of the box.
187 /// - For a slice, this is the base address.
188 pub const FAT_PTR_ADDR: usize = 0;
190 /// The second half of a fat pointer.
192 /// - For a trait object, this is the address of the vtable.
193 /// - For a slice, this is the length.
194 pub const FAT_PTR_EXTRA: usize = 1;
196 /// The maximum supported number of lanes in a SIMD vector.
198 /// This value is selected based on backend support:
199 /// * LLVM does not appear to have a vector width limit.
200 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
201 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
203 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
204 pub enum LayoutError<'tcx> {
206 SizeOverflow(Ty<'tcx>),
207 NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
210 impl<'tcx> fmt::Display for LayoutError<'tcx> {
211 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
213 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
214 LayoutError::SizeOverflow(ty) => {
215 write!(f, "values of the type `{}` are too big for the current architecture", ty)
217 LayoutError::NormalizationFailure(t, e) => write!(
219 "unable to determine layout for `{}` because `{}` cannot be normalized",
221 e.get_type_for_failure()
227 #[instrument(skip(tcx, query), level = "debug")]
230 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
231 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
232 let (param_env, ty) = query.into_parts();
235 let param_env = param_env.with_reveal_all_normalized(tcx);
236 let unnormalized_ty = ty;
238 // FIXME: We might want to have two different versions of `layout_of`:
239 // One that can be called after typecheck has completed and can use
240 // `normalize_erasing_regions` here and another one that can be called
241 // before typecheck has completed and uses `try_normalize_erasing_regions`.
242 let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
244 Err(normalization_error) => {
245 return Err(LayoutError::NormalizationFailure(ty, normalization_error));
249 if ty != unnormalized_ty {
250 // Ensure this layout is also cached for the normalized type.
251 return tcx.layout_of(param_env.and(ty));
254 let cx = LayoutCx { tcx, param_env };
256 let layout = cx.layout_of_uncached(ty)?;
257 let layout = TyAndLayout { ty, layout };
259 cx.record_layout_for_printing(layout);
261 sanity_check_layout(&cx, &layout);
266 #[derive(Clone, Copy)]
267 pub struct LayoutCx<'tcx, C> {
269 pub param_env: ty::ParamEnv<'tcx>,
272 #[derive(Copy, Clone, Debug)]
274 /// A tuple, closure, or univariant which cannot be coerced to unsized.
276 /// A univariant, the last field of which may be coerced to unsized.
278 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
279 Prefixed(Size, Align),
282 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
283 // This is used to go between `memory_index` (source field order to memory order)
284 // and `inverse_memory_index` (memory order to source field order).
285 // See also `FieldsShape::Arbitrary::memory_index` for more details.
286 // FIXME(eddyb) build a better abstraction for permutations, if possible.
287 fn invert_mapping(map: &[u32]) -> Vec<u32> {
288 let mut inverse = vec![0; map.len()];
289 for i in 0..map.len() {
290 inverse[map[i] as usize] = i as u32;
295 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
296 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
297 let dl = self.data_layout();
298 let b_align = b.align(dl);
299 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
300 let b_offset = a.size(dl).align_to(b_align.abi);
301 let size = (b_offset + b.size(dl)).align_to(align.abi);
303 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
304 // returns the last maximum.
305 let largest_niche = Niche::from_scalar(dl, b_offset, b)
307 .chain(Niche::from_scalar(dl, Size::ZERO, a))
308 .max_by_key(|niche| niche.available(dl));
311 variants: Variants::Single { index: VariantIdx::new(0) },
312 fields: FieldsShape::Arbitrary {
313 offsets: vec![Size::ZERO, b_offset],
314 memory_index: vec![0, 1],
316 abi: Abi::ScalarPair(a, b),
323 fn univariant_uninterned(
326 fields: &[TyAndLayout<'_>],
329 ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
330 let dl = self.data_layout();
331 let pack = repr.pack;
332 if pack.is_some() && repr.align.is_some() {
333 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
334 return Err(LayoutError::Unknown(ty));
337 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
339 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
341 let optimize = !repr.inhibit_struct_field_reordering_opt();
344 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
345 let optimizing = &mut inverse_memory_index[..end];
346 let field_align = |f: &TyAndLayout<'_>| {
347 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
350 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
351 // the field ordering to try and catch some code making assumptions about layouts
352 // we don't guarantee
353 if repr.can_randomize_type_layout() {
354 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
355 // randomize field ordering with
356 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
358 // Shuffle the ordering of the fields
359 optimizing.shuffle(&mut rng);
361 // Otherwise we just leave things alone and actually optimize the type's fields
364 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
365 optimizing.sort_by_key(|&x| {
366 // Place ZSTs first to avoid "interesting offsets",
367 // especially with only one or two non-ZST fields.
368 let f = &fields[x as usize];
369 (!f.is_zst(), cmp::Reverse(field_align(f)))
373 StructKind::Prefixed(..) => {
374 // Sort in ascending alignment so that the layout stays optimal
375 // regardless of the prefix
376 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
380 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
381 // regardless of the status of `-Z randomize-layout`
385 // inverse_memory_index holds field indices by increasing memory offset.
386 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
387 // We now write field offsets to the corresponding offset slot;
388 // field 5 with offset 0 puts 0 in offsets[5].
389 // At the bottom of this function, we invert `inverse_memory_index` to
390 // produce `memory_index` (see `invert_mapping`).
392 let mut sized = true;
393 let mut offsets = vec![Size::ZERO; fields.len()];
394 let mut offset = Size::ZERO;
395 let mut largest_niche = None;
396 let mut largest_niche_available = 0;
398 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
400 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
401 align = align.max(AbiAndPrefAlign::new(prefix_align));
402 offset = prefix_size.align_to(prefix_align);
405 for &i in &inverse_memory_index {
406 let field = fields[i as usize];
408 self.tcx.sess.delay_span_bug(
411 "univariant: field #{} of `{}` comes after unsized field",
418 if field.is_unsized() {
422 // Invariant: offset < dl.obj_size_bound() <= 1<<61
423 let field_align = if let Some(pack) = pack {
424 field.align.min(AbiAndPrefAlign::new(pack))
428 offset = offset.align_to(field_align.abi);
429 align = align.max(field_align);
431 debug!("univariant offset: {:?} field: {:#?}", offset, field);
432 offsets[i as usize] = offset;
434 if let Some(mut niche) = field.largest_niche {
435 let available = niche.available(dl);
436 if available > largest_niche_available {
437 largest_niche_available = available;
438 niche.offset += offset;
439 largest_niche = Some(niche);
443 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
446 if let Some(repr_align) = repr.align {
447 align = align.max(AbiAndPrefAlign::new(repr_align));
450 debug!("univariant min_size: {:?}", offset);
451 let min_size = offset;
453 // As stated above, inverse_memory_index holds field indices by increasing offset.
454 // This makes it an already-sorted view of the offsets vec.
455 // To invert it, consider:
456 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
457 // Field 5 would be the first element, so memory_index is i:
458 // Note: if we didn't optimize, it's already right.
461 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
463 let size = min_size.align_to(align.abi);
464 let mut abi = Abi::Aggregate { sized };
466 // Unpack newtype ABIs and find scalar pairs.
467 if sized && size.bytes() > 0 {
468 // All other fields must be ZSTs.
469 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
471 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
472 // We have exactly one non-ZST field.
473 (Some((i, field)), None, None) => {
474 // Field fills the struct and it has a scalar or scalar pair ABI.
475 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
478 // For plain scalars, or vectors of them, we can't unpack
479 // newtypes for `#[repr(C)]`, as that affects C ABIs.
480 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
483 // But scalar pairs are Rust-specific and get
484 // treated as aggregates by C ABIs anyway.
485 Abi::ScalarPair(..) => {
493 // Two non-ZST fields, and they're both scalars.
494 (Some((i, a)), Some((j, b)), None) => {
495 match (a.abi, b.abi) {
496 (Abi::Scalar(a), Abi::Scalar(b)) => {
497 // Order by the memory placement, not source order.
498 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
503 let pair = self.scalar_pair(a, b);
504 let pair_offsets = match pair.fields {
505 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
506 assert_eq!(memory_index, &[0, 1]);
511 if offsets[i] == pair_offsets[0]
512 && offsets[j] == pair_offsets[1]
513 && align == pair.align
516 // We can use `ScalarPair` only when it matches our
517 // already computed layout (including `#[repr(C)]`).
529 if fields.iter().any(|f| f.abi.is_uninhabited()) {
530 abi = Abi::Uninhabited;
534 variants: Variants::Single { index: VariantIdx::new(0) },
535 fields: FieldsShape::Arbitrary { offsets, memory_index },
543 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
545 let param_env = self.param_env;
546 let dl = self.data_layout();
547 let scalar_unit = |value: Primitive| {
548 let size = value.size(dl);
549 assert!(size.bits() <= 128);
550 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
553 |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
555 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
556 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
558 debug_assert!(!ty.has_infer_types_or_consts());
560 Ok(match *ty.kind() {
562 ty::Bool => tcx.intern_layout(LayoutS::scalar(
564 Scalar::Initialized {
565 value: Int(I8, false),
566 valid_range: WrappingRange { start: 0, end: 1 },
569 ty::Char => tcx.intern_layout(LayoutS::scalar(
571 Scalar::Initialized {
572 value: Int(I32, false),
573 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
576 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
577 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
578 ty::Float(fty) => scalar(match fty {
579 ty::FloatTy::F32 => F32,
580 ty::FloatTy::F64 => F64,
583 let mut ptr = scalar_unit(Pointer);
584 ptr.valid_range_mut().start = 1;
585 tcx.intern_layout(LayoutS::scalar(self, ptr))
589 ty::Never => tcx.intern_layout(LayoutS {
590 variants: Variants::Single { index: VariantIdx::new(0) },
591 fields: FieldsShape::Primitive,
592 abi: Abi::Uninhabited,
598 // Potentially-wide pointers.
599 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
600 let mut data_ptr = scalar_unit(Pointer);
601 if !ty.is_unsafe_ptr() {
602 data_ptr.valid_range_mut().start = 1;
605 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
606 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
607 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
610 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
611 let metadata = match unsized_part.kind() {
613 return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
615 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
617 let mut vtable = scalar_unit(Pointer);
618 vtable.valid_range_mut().start = 1;
621 _ => return Err(LayoutError::Unknown(unsized_part)),
624 // Effectively a (ptr, meta) tuple.
625 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
628 // Arrays and slices.
629 ty::Array(element, mut count) => {
630 if count.has_projections() {
631 count = tcx.normalize_erasing_regions(param_env, count);
632 if count.has_projections() {
633 return Err(LayoutError::Unknown(ty));
637 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
638 let element = self.layout_of(element)?;
640 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
643 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
646 Abi::Aggregate { sized: true }
649 let largest_niche = if count != 0 { element.largest_niche } else { None };
651 tcx.intern_layout(LayoutS {
652 variants: Variants::Single { index: VariantIdx::new(0) },
653 fields: FieldsShape::Array { stride: element.size, count },
656 align: element.align,
660 ty::Slice(element) => {
661 let element = self.layout_of(element)?;
662 tcx.intern_layout(LayoutS {
663 variants: Variants::Single { index: VariantIdx::new(0) },
664 fields: FieldsShape::Array { stride: element.size, count: 0 },
665 abi: Abi::Aggregate { sized: false },
667 align: element.align,
671 ty::Str => tcx.intern_layout(LayoutS {
672 variants: Variants::Single { index: VariantIdx::new(0) },
673 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
674 abi: Abi::Aggregate { sized: false },
681 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
682 ty::Dynamic(..) | ty::Foreign(..) => {
683 let mut unit = self.univariant_uninterned(
686 &ReprOptions::default(),
687 StructKind::AlwaysSized,
690 Abi::Aggregate { ref mut sized } => *sized = false,
693 tcx.intern_layout(unit)
696 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
698 ty::Closure(_, ref substs) => {
699 let tys = substs.as_closure().upvar_tys();
701 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
702 &ReprOptions::default(),
703 StructKind::AlwaysSized,
709 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
712 &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
713 &ReprOptions::default(),
718 // SIMD vector types.
719 ty::Adt(def, substs) if def.repr().simd() => {
720 if !def.is_struct() {
721 // Should have yielded E0517 by now.
722 tcx.sess.delay_span_bug(
724 "#[repr(simd)] was applied to an ADT that is not a struct",
726 return Err(LayoutError::Unknown(ty));
729 // Supported SIMD vectors are homogeneous ADTs with at least one field:
731 // * #[repr(simd)] struct S(T, T, T, T);
732 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
733 // * #[repr(simd)] struct S([T; 4])
735 // where T is a primitive scalar (integer/float/pointer).
737 // SIMD vectors with zero fields are not supported.
738 // (should be caught by typeck)
739 if def.non_enum_variant().fields.is_empty() {
740 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
743 // Type of the first ADT field:
744 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
746 // Heterogeneous SIMD vectors are not supported:
747 // (should be caught by typeck)
748 for fi in &def.non_enum_variant().fields {
749 if fi.ty(tcx, substs) != f0_ty {
750 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
754 // The element type and number of elements of the SIMD vector
755 // are obtained from:
757 // * the element type and length of the single array field, if
758 // the first field is of array type, or
760 // * the homogeneous field type and the number of fields.
761 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
762 // First ADT field is an array:
764 // SIMD vectors with multiple array fields are not supported:
765 // (should be caught by typeck)
766 if def.non_enum_variant().fields.len() != 1 {
767 tcx.sess.fatal(&format!(
768 "monomorphising SIMD type `{}` with more than one array field",
773 // Extract the number of elements from the layout of the array field:
774 let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
775 return Err(LayoutError::Unknown(ty));
778 (*e_ty, *count, true)
780 // First ADT field is not an array:
781 (f0_ty, def.non_enum_variant().fields.len() as _, false)
784 // SIMD vectors of zero length are not supported.
785 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
788 // Can't be caught in typeck if the array length is generic.
790 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
791 } else if e_len > MAX_SIMD_LANES {
792 tcx.sess.fatal(&format!(
793 "monomorphising SIMD type `{}` of length greater than {}",
798 // Compute the ABI of the element type:
799 let e_ly = self.layout_of(e_ty)?;
800 let Abi::Scalar(e_abi) = e_ly.abi else {
801 // This error isn't caught in typeck, e.g., if
802 // the element type of the vector is generic.
803 tcx.sess.fatal(&format!(
804 "monomorphising SIMD type `{}` with a non-primitive-scalar \
805 (integer/float/pointer) element type `{}`",
810 // Compute the size and alignment of the vector:
811 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
812 let align = dl.vector_align(size);
813 let size = size.align_to(align.abi);
815 // Compute the placement of the vector fields:
816 let fields = if is_array {
817 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
819 FieldsShape::Array { stride: e_ly.size, count: e_len }
822 tcx.intern_layout(LayoutS {
823 variants: Variants::Single { index: VariantIdx::new(0) },
825 abi: Abi::Vector { element: e_abi, count: e_len },
826 largest_niche: e_ly.largest_niche,
833 ty::Adt(def, substs) => {
834 // Cache the field layouts.
841 .map(|field| self.layout_of(field.ty(tcx, substs)))
842 .collect::<Result<Vec<_>, _>>()
844 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
847 if def.repr().pack.is_some() && def.repr().align.is_some() {
848 self.tcx.sess.delay_span_bug(
849 tcx.def_span(def.did()),
850 "union cannot be packed and aligned",
852 return Err(LayoutError::Unknown(ty));
856 if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
858 if let Some(repr_align) = def.repr().align {
859 align = align.max(AbiAndPrefAlign::new(repr_align));
862 let optimize = !def.repr().inhibit_union_abi_opt();
863 let mut size = Size::ZERO;
864 let mut abi = Abi::Aggregate { sized: true };
865 let index = VariantIdx::new(0);
866 for field in &variants[index] {
867 assert!(!field.is_unsized());
868 align = align.max(field.align);
870 // If all non-ZST fields have the same ABI, forward this ABI
871 if optimize && !field.is_zst() {
872 // Discard valid range information and allow undef
873 let field_abi = match field.abi {
874 Abi::Scalar(x) => Abi::Scalar(x.to_union()),
875 Abi::ScalarPair(x, y) => {
876 Abi::ScalarPair(x.to_union(), y.to_union())
878 Abi::Vector { element: x, count } => {
879 Abi::Vector { element: x.to_union(), count }
881 Abi::Uninhabited | Abi::Aggregate { .. } => {
882 Abi::Aggregate { sized: true }
886 if size == Size::ZERO {
887 // first non ZST: initialize 'abi'
889 } else if abi != field_abi {
890 // different fields have different ABI: reset to Aggregate
891 abi = Abi::Aggregate { sized: true };
895 size = cmp::max(size, field.size);
898 if let Some(pack) = def.repr().pack {
899 align = align.min(AbiAndPrefAlign::new(pack));
902 return Ok(tcx.intern_layout(LayoutS {
903 variants: Variants::Single { index },
904 fields: FieldsShape::Union(
905 NonZeroUsize::new(variants[index].len())
906 .ok_or(LayoutError::Unknown(ty))?,
911 size: size.align_to(align.abi),
915 // A variant is absent if it's uninhabited and only has ZST fields.
916 // Present uninhabited variants only require space for their fields,
917 // but *not* an encoding of the discriminant (e.g., a tag value).
918 // See issue #49298 for more details on the need to leave space
919 // for non-ZST uninhabited data (mostly partial initialization).
920 let absent = |fields: &[TyAndLayout<'_>]| {
921 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
922 let is_zst = fields.iter().all(|f| f.is_zst());
923 uninhabited && is_zst
925 let (present_first, present_second) = {
926 let mut present_variants = variants
928 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
929 (present_variants.next(), present_variants.next())
931 let present_first = match present_first {
932 Some(present_first) => present_first,
933 // Uninhabited because it has no variants, or only absent ones.
934 None if def.is_enum() => {
935 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
937 // If it's a struct, still compute a layout so that we can still compute the
939 None => VariantIdx::new(0),
942 let is_struct = !def.is_enum() ||
943 // Only one variant is present.
944 (present_second.is_none() &&
945 // Representation optimizations are allowed.
946 !def.repr().inhibit_enum_layout_opt());
948 // Struct, or univariant enum equivalent to a struct.
949 // (Typechecking will reject discriminant-sizing attrs.)
951 let v = present_first;
952 let kind = if def.is_enum() || variants[v].is_empty() {
953 StructKind::AlwaysSized
955 let param_env = tcx.param_env(def.did());
956 let last_field = def.variant(v).fields.last().unwrap();
958 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
960 StructKind::MaybeUnsized
962 StructKind::AlwaysSized
966 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
967 st.variants = Variants::Single { index: v };
969 if def.is_unsafe_cell() {
970 let hide_niches = |scalar: &mut _| match scalar {
971 Scalar::Initialized { value, valid_range } => {
972 *valid_range = WrappingRange::full(value.size(dl))
974 // Already doesn't have any niches
975 Scalar::Union { .. } => {}
978 Abi::Uninhabited => {}
979 Abi::Scalar(scalar) => hide_niches(scalar),
980 Abi::ScalarPair(a, b) => {
984 Abi::Vector { element, count: _ } => hide_niches(element),
985 Abi::Aggregate { sized: _ } => {}
987 st.largest_niche = None;
988 return Ok(tcx.intern_layout(st));
991 let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
993 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
994 // the asserts ensure that we are not using the
995 // `#[rustc_layout_scalar_valid_range(n)]`
996 // attribute to widen the range of anything as that would probably
997 // result in UB somewhere
998 // FIXME(eddyb) the asserts are probably not needed,
999 // as larger validity ranges would result in missed
1000 // optimizations, *not* wrongly assuming the inner
1001 // value is valid. e.g. unions enlarge validity ranges,
1002 // because the values may be uninitialized.
1003 if let Bound::Included(start) = start {
1004 // FIXME(eddyb) this might be incorrect - it doesn't
1005 // account for wrap-around (end < start) ranges.
1006 let valid_range = scalar.valid_range_mut();
1007 assert!(valid_range.start <= start);
1008 valid_range.start = start;
1010 if let Bound::Included(end) = end {
1011 // FIXME(eddyb) this might be incorrect - it doesn't
1012 // account for wrap-around (end < start) ranges.
1013 let valid_range = scalar.valid_range_mut();
1014 assert!(valid_range.end >= end);
1015 valid_range.end = end;
1018 // Update `largest_niche` if we have introduced a larger niche.
1019 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
1020 if let Some(niche) = niche {
1021 match st.largest_niche {
1022 Some(largest_niche) => {
1023 // Replace the existing niche even if they're equal,
1024 // because this one is at a lower offset.
1025 if largest_niche.available(dl) <= niche.available(dl) {
1026 st.largest_niche = Some(niche);
1029 None => st.largest_niche = Some(niche),
1034 start == Bound::Unbounded && end == Bound::Unbounded,
1035 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1041 return Ok(tcx.intern_layout(st));
1044 // At this point, we have handled all unions and
1045 // structs. (We have also handled univariant enums
1046 // that allow representation optimization.)
1047 assert!(def.is_enum());
1049 // Until we've decided whether to use the tagged or
1050 // niche filling LayoutS, we don't want to intern the
1051 // variant layouts, so we can't store them in the
1052 // overall LayoutS. Store the overall LayoutS
1053 // and the variant LayoutSs here until then.
1054 struct TmpLayout<'tcx> {
1055 layout: LayoutS<'tcx>,
1056 variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
1059 let calculate_niche_filling_layout =
1060 || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
1061 // The current code for niche-filling relies on variant indices
1062 // instead of actual discriminants, so enums with
1063 // explicit discriminants (RFC #2363) would misbehave.
1064 if def.repr().inhibit_enum_layout_opt()
1068 .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
1073 if variants.len() < 2 {
1077 let mut align = dl.aggregate_align;
1078 let mut variant_layouts = variants
1081 let mut st = self.univariant_uninterned(
1085 StructKind::AlwaysSized,
1087 st.variants = Variants::Single { index: j };
1089 align = align.max(st.align);
1093 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1095 let largest_variant_index = match variant_layouts
1097 .max_by_key(|(_i, layout)| layout.size.bytes())
1098 .map(|(i, _layout)| i)
1100 None => return Ok(None),
1104 let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
1105 let needs_disc = |index: VariantIdx| {
1106 index != largest_variant_index && !absent(&variants[index])
1108 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
1109 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
1111 let count = niche_variants.size_hint().1.unwrap() as u128;
1113 // Find the field with the largest niche
1114 let (field_index, niche, (niche_start, niche_scalar)) = match variants
1115 [largest_variant_index]
1118 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1119 .max_by_key(|(_, niche)| niche.available(dl))
1120 .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
1122 None => return Ok(None),
1126 let niche_offset = niche.offset
1127 + variant_layouts[largest_variant_index].fields.offset(field_index);
1128 let niche_size = niche.value.size(dl);
1129 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
1131 let all_variants_fit =
1132 variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
1133 if i == largest_variant_index {
1137 layout.largest_niche = None;
1139 if layout.size <= niche_offset {
1140 // This variant will fit before the niche.
1144 // Determine if it'll fit after the niche.
1145 let this_align = layout.align.abi;
1146 let this_offset = (niche_offset + niche_size).align_to(this_align);
1148 if this_offset + layout.size > size {
1152 // It'll fit, but we need to make some adjustments.
1153 match layout.fields {
1154 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1155 for (j, offset) in offsets.iter_mut().enumerate() {
1156 if !variants[i][j].is_zst() {
1157 *offset += this_offset;
1162 panic!("Layout of fields should be Arbitrary for variants")
1166 // It can't be a Scalar or ScalarPair because the offset isn't 0.
1167 if !layout.abi.is_uninhabited() {
1168 layout.abi = Abi::Aggregate { sized: true };
1170 layout.size += this_offset;
1175 if !all_variants_fit {
1179 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
1181 let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
1182 i == largest_variant_index || layout.size == Size::ZERO
1184 let same_size = size == variant_layouts[largest_variant_index].size;
1185 let same_align = align == variant_layouts[largest_variant_index].align;
1187 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
1189 } else if same_size && same_align && others_zst {
1190 match variant_layouts[largest_variant_index].abi {
1191 // When the total alignment and size match, we can use the
1192 // same ABI as the scalar variant with the reserved niche.
1193 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1194 Abi::ScalarPair(first, second) => {
1195 // Only the niche is guaranteed to be initialised,
1196 // so use union layouts for the other primitive.
1197 if niche_offset == Size::ZERO {
1198 Abi::ScalarPair(niche_scalar, second.to_union())
1200 Abi::ScalarPair(first.to_union(), niche_scalar)
1203 _ => Abi::Aggregate { sized: true },
1206 Abi::Aggregate { sized: true }
1209 let layout = LayoutS {
1210 variants: Variants::Multiple {
1212 tag_encoding: TagEncoding::Niche {
1213 untagged_variant: largest_variant_index,
1218 variants: IndexVec::new(),
1220 fields: FieldsShape::Arbitrary {
1221 offsets: vec![niche_offset],
1222 memory_index: vec![0],
1230 Ok(Some(TmpLayout { layout, variants: variant_layouts }))
1233 let niche_filling_layout = calculate_niche_filling_layout()?;
1235 let (mut min, mut max) = (i128::MAX, i128::MIN);
1236 let discr_type = def.repr().discr_type();
1237 let bits = Integer::from_attr(self, discr_type).size().bits();
1238 for (i, discr) in def.discriminants(tcx) {
1239 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1242 let mut x = discr.val as i128;
1243 if discr_type.is_signed() {
1244 // sign extend the raw representation to be an i128
1245 x = (x << (128 - bits)) >> (128 - bits);
1254 // We might have no inhabited variants, so pretend there's at least one.
1255 if (min, max) == (i128::MAX, i128::MIN) {
1259 assert!(min <= max, "discriminant range is {}...{}", min, max);
1260 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
1262 let mut align = dl.aggregate_align;
1263 let mut size = Size::ZERO;
1265 // We're interested in the smallest alignment, so start large.
1266 let mut start_align = Align::from_bytes(256).unwrap();
1267 assert_eq!(Integer::for_align(dl, start_align), None);
1269 // repr(C) on an enum tells us to make a (tag, union) layout,
1270 // so we need to grow the prefix alignment to be at least
1271 // the alignment of the union. (This value is used both for
1272 // determining the alignment of the overall enum, and the
1273 // determining the alignment of the payload after the tag.)
1274 let mut prefix_align = min_ity.align(dl).abi;
1276 for fields in &variants {
1277 for field in fields {
1278 prefix_align = prefix_align.max(field.align.abi);
1283 // Create the set of structs that represent each variant.
1284 let mut layout_variants = variants
1286 .map(|(i, field_layouts)| {
1287 let mut st = self.univariant_uninterned(
1291 StructKind::Prefixed(min_ity.size(), prefix_align),
1293 st.variants = Variants::Single { index: i };
1294 // Find the first field we can't move later
1295 // to make room for a larger discriminant.
1297 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1299 if !field.is_zst() || field.align.abi.bytes() != 1 {
1300 start_align = start_align.min(field.align.abi);
1304 size = cmp::max(size, st.size);
1305 align = align.max(st.align);
1308 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1310 // Align the maximum variant size to the largest alignment.
1311 size = size.align_to(align.abi);
1313 if size.bytes() >= dl.obj_size_bound() {
1314 return Err(LayoutError::SizeOverflow(ty));
1317 let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
1318 if typeck_ity < min_ity {
1319 // It is a bug if Layout decided on a greater discriminant size than typeck for
1320 // some reason at this point (based on values discriminant can take on). Mostly
1321 // because this discriminant will be loaded, and then stored into variable of
1322 // type calculated by typeck. Consider such case (a bug): typeck decided on
1323 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1324 // discriminant values. That would be a bug, because then, in codegen, in order
1325 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1326 // space necessary to represent would have to be discarded (or layout is wrong
1327 // on thinking it needs 16 bits)
1329 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1333 // However, it is fine to make discr type however large (as an optimisation)
1334 // after this point – we’ll just truncate the value we load in codegen.
1337 // Check to see if we should use a different type for the
1338 // discriminant. We can safely use a type with the same size
1339 // as the alignment of the first field of each variant.
1340 // We increase the size of the discriminant to avoid LLVM copying
1341 // padding when it doesn't need to. This normally causes unaligned
1342 // load/stores and excessive memcpy/memset operations. By using a
1343 // bigger integer size, LLVM can be sure about its contents and
1344 // won't be so conservative.
1346 // Use the initial field alignment
1347 let mut ity = if def.repr().c() || def.repr().int.is_some() {
1350 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1353 // If the alignment is not larger than the chosen discriminant size,
1354 // don't use the alignment as the final size.
1358 // Patch up the variants' first few fields.
1359 let old_ity_size = min_ity.size();
1360 let new_ity_size = ity.size();
1361 for variant in &mut layout_variants {
1362 match variant.fields {
1363 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1365 if *i <= old_ity_size {
1366 assert_eq!(*i, old_ity_size);
1370 // We might be making the struct larger.
1371 if variant.size <= old_ity_size {
1372 variant.size = new_ity_size;
1380 let tag_mask = ity.size().unsigned_int_max();
1381 let tag = Scalar::Initialized {
1382 value: Int(ity, signed),
1383 valid_range: WrappingRange {
1384 start: (min as u128 & tag_mask),
1385 end: (max as u128 & tag_mask),
1388 let mut abi = Abi::Aggregate { sized: true };
1390 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1391 abi = Abi::Uninhabited;
1392 } else if tag.size(dl) == size {
1393 // Make sure we only use scalar layout when the enum is entirely its
1394 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1395 abi = Abi::Scalar(tag);
1397 // Try to use a ScalarPair for all tagged enums.
1398 let mut common_prim = None;
1399 let mut common_prim_initialized_in_all_variants = true;
1400 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1401 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
1405 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1406 let (field, offset) = match (fields.next(), fields.next()) {
1408 common_prim_initialized_in_all_variants = false;
1411 (Some(pair), None) => pair,
1417 let prim = match field.abi {
1418 Abi::Scalar(scalar) => {
1419 common_prim_initialized_in_all_variants &=
1420 matches!(scalar, Scalar::Initialized { .. });
1428 if let Some(pair) = common_prim {
1429 // This is pretty conservative. We could go fancier
1430 // by conflating things like i32 and u32, or even
1431 // realising that (u8, u8) could just cohabit with
1433 if pair != (prim, offset) {
1438 common_prim = Some((prim, offset));
1441 if let Some((prim, offset)) = common_prim {
1442 let prim_scalar = if common_prim_initialized_in_all_variants {
1445 // Common prim might be uninit.
1446 Scalar::Union { value: prim }
1448 let pair = self.scalar_pair(tag, prim_scalar);
1449 let pair_offsets = match pair.fields {
1450 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1451 assert_eq!(memory_index, &[0, 1]);
1456 if pair_offsets[0] == Size::ZERO
1457 && pair_offsets[1] == *offset
1458 && align == pair.align
1459 && size == pair.size
1461 // We can use `ScalarPair` only when it matches our
1462 // already computed layout (including `#[repr(C)]`).
1468 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1469 // variants to ensure they are consistent. This is because a downcast is
1470 // semantically a NOP, and thus should not affect layout.
1471 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
1472 for variant in &mut layout_variants {
1473 // We only do this for variants with fields; the others are not accessed anyway.
1474 // Also do not overwrite any already existing "clever" ABIs.
1475 if variant.fields.count() > 0
1476 && matches!(variant.abi, Abi::Aggregate { .. })
1479 // Also need to bump up the size and alignment, so that the entire value fits in here.
1480 variant.size = cmp::max(variant.size, size);
1481 variant.align.abi = cmp::max(variant.align.abi, align.abi);
1486 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1488 let tagged_layout = LayoutS {
1489 variants: Variants::Multiple {
1491 tag_encoding: TagEncoding::Direct,
1493 variants: IndexVec::new(),
1495 fields: FieldsShape::Arbitrary {
1496 offsets: vec![Size::ZERO],
1497 memory_index: vec![0],
1505 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
1507 let mut best_layout = match (tagged_layout, niche_filling_layout) {
1509 // Pick the smaller layout; otherwise,
1510 // pick the layout with the larger niche; otherwise,
1511 // pick tagged as it has simpler codegen.
1513 let niche_size = |tmp_l: &TmpLayout<'_>| {
1514 tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
1517 tl.layout.size.cmp(&nl.layout.size),
1518 niche_size(&tl).cmp(&niche_size(&nl)),
1521 (Equal, Less) => nl,
1528 // Now we can intern the variant layouts and store them in the enum layout.
1529 best_layout.layout.variants = match best_layout.layout.variants {
1530 Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
1534 variants: best_layout
1537 .map(|layout| tcx.intern_layout(layout))
1543 tcx.intern_layout(best_layout.layout)
1546 // Types with no meaningful known layout.
1547 ty::Projection(_) | ty::Opaque(..) => {
1548 // NOTE(eddyb) `layout_of` query should've normalized these away,
1549 // if that was possible, so there's no reason to try again here.
1550 return Err(LayoutError::Unknown(ty));
1553 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1554 bug!("Layout::compute: unexpected type `{}`", ty)
1557 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1558 return Err(LayoutError::Unknown(ty));
1564 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1565 #[derive(Clone, Debug, PartialEq)]
1566 enum SavedLocalEligibility {
1568 Assigned(VariantIdx),
1569 // FIXME: Use newtype_index so we aren't wasting bytes
1570 Ineligible(Option<u32>),
1573 // When laying out generators, we divide our saved local fields into two
1574 // categories: overlap-eligible and overlap-ineligible.
1576 // Those fields which are ineligible for overlap go in a "prefix" at the
1577 // beginning of the layout, and always have space reserved for them.
1579 // Overlap-eligible fields are only assigned to one variant, so we lay
1580 // those fields out for each variant and put them right after the
1583 // Finally, in the layout details, we point to the fields from the
1584 // variants they are assigned to. It is possible for some fields to be
1585 // included in multiple variants. No field ever "moves around" in the
1586 // layout; its offset is always the same.
1588 // Also included in the layout are the upvars and the discriminant.
1589 // These are included as fields on the "outer" layout; they are not part
1591 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1592 /// Compute the eligibility and assignment of each local.
1593 fn generator_saved_local_eligibility(
1595 info: &GeneratorLayout<'tcx>,
1596 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1597 use SavedLocalEligibility::*;
1599 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1600 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1602 // The saved locals not eligible for overlap. These will get
1603 // "promoted" to the prefix of our generator.
1604 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1606 // Figure out which of our saved locals are fields in only
1607 // one variant. The rest are deemed ineligible for overlap.
1608 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1609 for local in fields {
1610 match assignments[*local] {
1612 assignments[*local] = Assigned(variant_index);
1615 // We've already seen this local at another suspension
1616 // point, so it is no longer a candidate.
1618 "removing local {:?} in >1 variant ({:?}, {:?})",
1623 ineligible_locals.insert(*local);
1624 assignments[*local] = Ineligible(None);
1631 // Next, check every pair of eligible locals to see if they
1633 for local_a in info.storage_conflicts.rows() {
1634 let conflicts_a = info.storage_conflicts.count(local_a);
1635 if ineligible_locals.contains(local_a) {
1639 for local_b in info.storage_conflicts.iter(local_a) {
1640 // local_a and local_b are storage live at the same time, therefore they
1641 // cannot overlap in the generator layout. The only way to guarantee
1642 // this is if they are in the same variant, or one is ineligible
1643 // (which means it is stored in every variant).
1644 if ineligible_locals.contains(local_b)
1645 || assignments[local_a] == assignments[local_b]
1650 // If they conflict, we will choose one to make ineligible.
1651 // This is not always optimal; it's just a greedy heuristic that
1652 // seems to produce good results most of the time.
1653 let conflicts_b = info.storage_conflicts.count(local_b);
1654 let (remove, other) =
1655 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1656 ineligible_locals.insert(remove);
1657 assignments[remove] = Ineligible(None);
1658 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1662 // Count the number of variants in use. If only one of them, then it is
1663 // impossible to overlap any locals in our layout. In this case it's
1664 // always better to make the remaining locals ineligible, so we can
1665 // lay them out with the other locals in the prefix and eliminate
1666 // unnecessary padding bytes.
1668 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1669 for assignment in &assignments {
1670 if let Assigned(idx) = assignment {
1671 used_variants.insert(*idx);
1674 if used_variants.count() < 2 {
1675 for assignment in assignments.iter_mut() {
1676 *assignment = Ineligible(None);
1678 ineligible_locals.insert_all();
1682 // Write down the order of our locals that will be promoted to the prefix.
1684 for (idx, local) in ineligible_locals.iter().enumerate() {
1685 assignments[local] = Ineligible(Some(idx as u32));
1688 debug!("generator saved local assignments: {:?}", assignments);
1690 (ineligible_locals, assignments)
1693 /// Compute the full generator layout.
1694 fn generator_layout(
1697 def_id: hir::def_id::DefId,
1698 substs: SubstsRef<'tcx>,
1699 ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
1700 use SavedLocalEligibility::*;
1702 let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
1704 let Some(info) = tcx.generator_layout(def_id) else {
1705 return Err(LayoutError::Unknown(ty));
1707 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1709 // Build a prefix layout, including "promoting" all ineligible
1710 // locals as part of the prefix. We compute the layout of all of
1711 // these fields at once to get optimal packing.
1712 let tag_index = substs.as_generator().prefix_tys().count();
1714 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1715 let max_discr = (info.variant_fields.len() - 1) as u128;
1716 let discr_int = Integer::fit_unsigned(max_discr);
1717 let discr_int_ty = discr_int.to_ty(tcx, false);
1718 let tag = Scalar::Initialized {
1719 value: Primitive::Int(discr_int, false),
1720 valid_range: WrappingRange { start: 0, end: max_discr },
1722 let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
1723 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1725 let promoted_layouts = ineligible_locals
1727 .map(|local| subst_field(info.field_tys[local]))
1728 .map(|ty| tcx.mk_maybe_uninit(ty))
1729 .map(|ty| self.layout_of(ty));
1730 let prefix_layouts = substs
1733 .map(|ty| self.layout_of(ty))
1734 .chain(iter::once(Ok(tag_layout)))
1735 .chain(promoted_layouts)
1736 .collect::<Result<Vec<_>, _>>()?;
1737 let prefix = self.univariant_uninterned(
1740 &ReprOptions::default(),
1741 StructKind::AlwaysSized,
1744 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1746 // Split the prefix layout into the "outer" fields (upvars and
1747 // discriminant) and the "promoted" fields. Promoted fields will
1748 // get included in each variant that requested them in
1750 debug!("prefix = {:#?}", prefix);
1751 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1752 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1753 let mut inverse_memory_index = invert_mapping(&memory_index);
1755 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1756 // "outer" and "promoted" fields respectively.
1757 let b_start = (tag_index + 1) as u32;
1758 let offsets_b = offsets.split_off(b_start as usize);
1759 let offsets_a = offsets;
1761 // Disentangle the "a" and "b" components of `inverse_memory_index`
1762 // by preserving the order but keeping only one disjoint "half" each.
1763 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1764 let inverse_memory_index_b: Vec<_> =
1765 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1766 inverse_memory_index.retain(|&i| i < b_start);
1767 let inverse_memory_index_a = inverse_memory_index;
1769 // Since `inverse_memory_index_{a,b}` each only refer to their
1770 // respective fields, they can be safely inverted
1771 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1772 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1775 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1776 (outer_fields, offsets_b, memory_index_b)
1781 let mut size = prefix.size;
1782 let mut align = prefix.align;
1786 .map(|(index, variant_fields)| {
1787 // Only include overlap-eligible fields when we compute our variant layout.
1788 let variant_only_tys = variant_fields
1790 .filter(|local| match assignments[**local] {
1791 Unassigned => bug!(),
1792 Assigned(v) if v == index => true,
1793 Assigned(_) => bug!("assignment does not match variant"),
1794 Ineligible(_) => false,
1796 .map(|local| subst_field(info.field_tys[*local]));
1798 let mut variant = self.univariant_uninterned(
1801 .map(|ty| self.layout_of(ty))
1802 .collect::<Result<Vec<_>, _>>()?,
1803 &ReprOptions::default(),
1804 StructKind::Prefixed(prefix_size, prefix_align.abi),
1806 variant.variants = Variants::Single { index };
1808 let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
1812 // Now, stitch the promoted and variant-only fields back together in
1813 // the order they are mentioned by our GeneratorLayout.
1814 // Because we only use some subset (that can differ between variants)
1815 // of the promoted fields, we can't just pick those elements of the
1816 // `promoted_memory_index` (as we'd end up with gaps).
1817 // So instead, we build an "inverse memory_index", as if all of the
1818 // promoted fields were being used, but leave the elements not in the
1819 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1820 // obtain a valid (bijective) mapping.
1821 const INVALID_FIELD_IDX: u32 = !0;
1822 let mut combined_inverse_memory_index =
1823 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1824 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1825 let combined_offsets = variant_fields
1829 let (offset, memory_index) = match assignments[*local] {
1830 Unassigned => bug!(),
1832 let (offset, memory_index) =
1833 offsets_and_memory_index.next().unwrap();
1834 (offset, promoted_memory_index.len() as u32 + memory_index)
1836 Ineligible(field_idx) => {
1837 let field_idx = field_idx.unwrap() as usize;
1838 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1841 combined_inverse_memory_index[memory_index as usize] = i as u32;
1846 // Remove the unused slots and invert the mapping to obtain the
1847 // combined `memory_index` (also see previous comment).
1848 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1849 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1851 variant.fields = FieldsShape::Arbitrary {
1852 offsets: combined_offsets,
1853 memory_index: combined_memory_index,
1856 size = size.max(variant.size);
1857 align = align.max(variant.align);
1858 Ok(tcx.intern_layout(variant))
1860 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1862 size = size.align_to(align.abi);
1865 if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
1868 Abi::Aggregate { sized: true }
1871 let layout = tcx.intern_layout(LayoutS {
1872 variants: Variants::Multiple {
1874 tag_encoding: TagEncoding::Direct,
1875 tag_field: tag_index,
1878 fields: outer_fields,
1880 largest_niche: prefix.largest_niche,
1884 debug!("generator layout ({:?}): {:#?}", ty, layout);
1888 /// This is invoked by the `layout_of` query to record the final
1889 /// layout of each type.
1891 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1892 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1893 // for dumping later.
1894 if self.tcx.sess.opts.unstable_opts.print_type_sizes {
1895 self.record_layout_for_printing_outlined(layout)
1899 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1900 // Ignore layouts that are done with non-empty environments or
1901 // non-monomorphic layouts, as the user only wants to see the stuff
1902 // resulting from the final codegen session.
1903 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1907 // (delay format until we actually need it)
1908 let record = |kind, packed, opt_discr_size, variants| {
1909 let type_desc = format!("{:?}", layout.ty);
1910 self.tcx.sess.code_stats.record_type_size(
1921 let adt_def = match *layout.ty.kind() {
1922 ty::Adt(ref adt_def, _) => {
1923 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1927 ty::Closure(..) => {
1928 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1929 record(DataTypeKind::Closure, false, None, vec![]);
1934 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1939 let adt_kind = adt_def.adt_kind();
1940 let adt_packed = adt_def.repr().pack.is_some();
1942 let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1943 let mut min_size = Size::ZERO;
1944 let field_info: Vec<_> = flds
1948 let field_layout = layout.field(self, i);
1949 let offset = layout.fields.offset(i);
1950 let field_end = offset + field_layout.size;
1951 if min_size < field_end {
1952 min_size = field_end;
1956 offset: offset.bytes(),
1957 size: field_layout.size.bytes(),
1958 align: field_layout.align.abi.bytes(),
1965 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1966 align: layout.align.abi.bytes(),
1967 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1972 match layout.variants {
1973 Variants::Single { index } => {
1974 if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
1976 "print-type-size `{:#?}` variant {}",
1978 adt_def.variant(index).name
1980 let variant_def = &adt_def.variant(index);
1981 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1986 vec![build_variant_info(Some(variant_def.name), &fields, layout)],
1989 // (This case arises for *empty* enums; so give it
1991 record(adt_kind.into(), adt_packed, None, vec![]);
1995 Variants::Multiple { tag, ref tag_encoding, .. } => {
1997 "print-type-size `{:#?}` adt general variants def {}",
1999 adt_def.variants().len()
2001 let variant_infos: Vec<_> = adt_def
2004 .map(|(i, variant_def)| {
2005 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
2007 Some(variant_def.name),
2009 layout.for_variant(self, i),
2016 match tag_encoding {
2017 TagEncoding::Direct => Some(tag.size(self)),
2027 /// Type size "skeleton", i.e., the only information determining a type's size.
2028 /// While this is conservative, (aside from constant sizes, only pointers,
2029 /// newtypes thereof and null pointer optimized enums are allowed), it is
2030 /// enough to statically check common use cases of transmute.
2031 #[derive(Copy, Clone, Debug)]
2032 pub enum SizeSkeleton<'tcx> {
2033 /// Any statically computable Layout.
2036 /// A potentially-fat pointer.
2038 /// If true, this pointer is never null.
2040 /// The type which determines the unsized metadata, if any,
2041 /// of this pointer. Either a type parameter or a projection
2042 /// depending on one, with regions erased.
2047 impl<'tcx> SizeSkeleton<'tcx> {
2051 param_env: ty::ParamEnv<'tcx>,
2052 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
2053 debug_assert!(!ty.has_infer_types_or_consts());
2055 // First try computing a static layout.
2056 let err = match tcx.layout_of(param_env.and(ty)) {
2058 return Ok(SizeSkeleton::Known(layout.size));
2064 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2065 let non_zero = !ty.is_unsafe_ptr();
2066 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
2068 ty::Param(_) | ty::Projection(_) => {
2069 debug_assert!(tail.has_param_types_or_consts());
2070 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
2073 "SizeSkeleton::compute({}): layout errored ({}), yet \
2074 tail `{}` is not a type parameter or a projection",
2082 ty::Adt(def, substs) => {
2083 // Only newtypes and enums w/ nullable pointer optimization.
2084 if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
2088 // Get a zero-sized variant or a pointer newtype.
2089 let zero_or_ptr_variant = |i| {
2090 let i = VariantIdx::new(i);
2092 def.variant(i).fields.iter().map(|field| {
2093 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
2096 for field in fields {
2099 SizeSkeleton::Known(size) => {
2100 if size.bytes() > 0 {
2104 SizeSkeleton::Pointer { .. } => {
2115 let v0 = zero_or_ptr_variant(0)?;
2117 if def.variants().len() == 1 {
2118 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2119 return Ok(SizeSkeleton::Pointer {
2121 || match tcx.layout_scalar_valid_range(def.did()) {
2122 (Bound::Included(start), Bound::Unbounded) => start > 0,
2123 (Bound::Included(start), Bound::Included(end)) => {
2124 0 < start && start < end
2135 let v1 = zero_or_ptr_variant(1)?;
2136 // Nullable pointer enum optimization.
2138 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
2139 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2140 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
2146 ty::Projection(_) | ty::Opaque(..) => {
2147 let normalized = tcx.normalize_erasing_regions(param_env, ty);
2148 if ty == normalized {
2151 SizeSkeleton::compute(normalized, tcx, param_env)
2159 pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
2160 match (self, other) {
2161 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2162 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2170 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2171 fn tcx(&self) -> TyCtxt<'tcx>;
2174 pub trait HasParamEnv<'tcx> {
2175 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2178 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2180 fn data_layout(&self) -> &TargetDataLayout {
2185 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
2186 fn target_spec(&self) -> &Target {
2191 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2193 fn tcx(&self) -> TyCtxt<'tcx> {
2198 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2200 fn data_layout(&self) -> &TargetDataLayout {
2205 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
2206 fn target_spec(&self) -> &Target {
2211 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2213 fn tcx(&self) -> TyCtxt<'tcx> {
2218 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2219 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2224 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2225 fn data_layout(&self) -> &TargetDataLayout {
2226 self.tcx.data_layout()
2230 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
2231 fn target_spec(&self) -> &Target {
2232 self.tcx.target_spec()
2236 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2237 fn tcx(&self) -> TyCtxt<'tcx> {
2242 pub trait MaybeResult<T> {
2245 fn from(x: Result<T, Self::Error>) -> Self;
2246 fn to_result(self) -> Result<T, Self::Error>;
2249 impl<T> MaybeResult<T> for T {
2252 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2255 fn to_result(self) -> Result<T, Self::Error> {
2260 impl<T, E> MaybeResult<T> for Result<T, E> {
2263 fn from(x: Result<T, Self::Error>) -> Self {
2266 fn to_result(self) -> Result<T, Self::Error> {
2271 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2273 /// Trait for contexts that want to be able to compute layouts of types.
2274 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2275 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2276 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2277 /// returned from `layout_of` (see also `handle_layout_err`).
2278 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2280 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2281 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2283 fn layout_tcx_at_span(&self) -> Span {
2287 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2288 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2290 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2291 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2292 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2293 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
2294 fn handle_layout_err(
2296 err: LayoutError<'tcx>,
2299 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2302 /// Blanket extension trait for contexts that can compute layouts of types.
2303 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2304 /// Computes the layout of a type. Note that this implicitly
2305 /// executes in "reveal all" mode, and will normalize the input type.
2307 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2308 self.spanned_layout_of(ty, DUMMY_SP)
2311 /// Computes the layout of a type, at `span`. Note that this implicitly
2312 /// executes in "reveal all" mode, and will normalize the input type.
2313 // FIXME(eddyb) avoid passing information like this, and instead add more
2314 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2316 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2317 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2318 let tcx = self.tcx().at(span);
2321 tcx.layout_of(self.param_env().and(ty))
2322 .map_err(|err| self.handle_layout_err(err, span, ty)),
2327 impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2329 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2330 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2333 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2338 impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2339 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2342 fn layout_tcx_at_span(&self) -> Span {
2347 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2352 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2354 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2356 fn ty_and_layout_for_variant(
2357 this: TyAndLayout<'tcx>,
2359 variant_index: VariantIdx,
2360 ) -> TyAndLayout<'tcx> {
2361 let layout = match this.variants {
2362 Variants::Single { index }
2363 // If all variants but one are uninhabited, the variant layout is the enum layout.
2364 if index == variant_index &&
2365 // Don't confuse variants of uninhabited enums with the enum itself.
2366 // For more details see https://github.com/rust-lang/rust/issues/69763.
2367 this.fields != FieldsShape::Primitive =>
2372 Variants::Single { index } => {
2374 let param_env = cx.param_env();
2376 // Deny calling for_variant more than once for non-Single enums.
2377 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2378 assert_eq!(original_layout.variants, Variants::Single { index });
2381 let fields = match this.ty.kind() {
2382 ty::Adt(def, _) if def.variants().is_empty() =>
2383 bug!("for_variant called on zero-variant enum"),
2384 ty::Adt(def, _) => def.variant(variant_index).fields.len(),
2387 tcx.intern_layout(LayoutS {
2388 variants: Variants::Single { index: variant_index },
2389 fields: match NonZeroUsize::new(fields) {
2390 Some(fields) => FieldsShape::Union(fields),
2391 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2393 abi: Abi::Uninhabited,
2394 largest_niche: None,
2395 align: tcx.data_layout.i8_align,
2400 Variants::Multiple { ref variants, .. } => variants[variant_index],
2403 assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
2405 TyAndLayout { ty: this.ty, layout }
2408 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2409 enum TyMaybeWithLayout<'tcx> {
2411 TyAndLayout(TyAndLayout<'tcx>),
2414 fn field_ty_or_layout<'tcx>(
2415 this: TyAndLayout<'tcx>,
2416 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2418 ) -> TyMaybeWithLayout<'tcx> {
2420 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2422 layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
2423 ty: tag.primitive().to_ty(tcx),
2427 match *this.ty.kind() {
2436 | ty::GeneratorWitness(..)
2438 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2440 // Potentially-fat pointers.
2441 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2442 assert!(i < this.fields.count());
2444 // Reuse the fat `*T` type as its own thin pointer data field.
2445 // This provides information about, e.g., DST struct pointees
2446 // (which may have no non-DST form), and will work as long
2447 // as the `Abi` or `FieldsShape` is checked by users.
2449 let nil = tcx.mk_unit();
2450 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2453 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2456 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2457 // the `Result` should always work because the type is
2458 // always either `*mut ()` or `&'static mut ()`.
2459 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2461 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2465 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2466 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2467 ty::Dynamic(_, _) => {
2468 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2469 tcx.lifetimes.re_static,
2470 tcx.mk_array(tcx.types.usize, 3),
2472 /* FIXME: use actual fn pointers
2473 Warning: naively computing the number of entries in the
2474 vtable by counting the methods on the trait + methods on
2475 all parent traits does not work, because some methods can
2476 be not object safe and thus excluded from the vtable.
2477 Increase this counter if you tried to implement this but
2478 failed to do it without duplicating a lot of code from
2479 other places in the compiler: 2
2481 tcx.mk_array(tcx.types.usize, 3),
2482 tcx.mk_array(Option<fn()>),
2486 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2490 // Arrays and slices.
2491 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2492 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2494 // Tuples, generators and closures.
2495 ty::Closure(_, ref substs) => field_ty_or_layout(
2496 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2501 ty::Generator(def_id, ref substs, _) => match this.variants {
2502 Variants::Single { index } => TyMaybeWithLayout::Ty(
2505 .state_tys(def_id, tcx)
2506 .nth(index.as_usize())
2511 Variants::Multiple { tag, tag_field, .. } => {
2513 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2515 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2519 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
2522 ty::Adt(def, substs) => {
2523 match this.variants {
2524 Variants::Single { index } => {
2525 TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
2528 // Discriminant field for enums (where applicable).
2529 Variants::Multiple { tag, .. } => {
2531 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2538 | ty::Placeholder(..)
2542 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2546 match field_ty_or_layout(this, cx, i) {
2547 TyMaybeWithLayout::Ty(field_ty) => {
2548 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2550 "failed to get layout for `{}`: {},\n\
2551 despite it being a field (#{}) of an existing layout: {:#?}",
2559 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2563 fn ty_and_layout_pointee_info_at(
2564 this: TyAndLayout<'tcx>,
2567 ) -> Option<PointeeInfo> {
2569 let param_env = cx.param_env();
2571 let addr_space_of_ty = |ty: Ty<'tcx>| {
2572 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2575 let pointee_info = match *this.ty.kind() {
2576 ty::RawPtr(mt) if offset.bytes() == 0 => {
2577 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2579 align: layout.align.abi,
2581 address_space: addr_space_of_ty(mt.ty),
2584 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2585 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2587 align: layout.align.abi,
2589 address_space: cx.data_layout().instruction_address_space,
2592 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2593 let address_space = addr_space_of_ty(ty);
2594 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2595 // Use conservative pointer kind if not optimizing. This saves us the
2596 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2597 // attributes in LLVM have compile-time cost even in unoptimized builds).
2598 PointerKind::SharedMutable
2601 hir::Mutability::Not => {
2602 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2605 PointerKind::SharedMutable
2608 hir::Mutability::Mut => {
2609 // References to self-referential structures should not be considered
2610 // noalias, as another pointer to the structure can be obtained, that
2611 // is not based-on the original reference. We consider all !Unpin
2612 // types to be potentially self-referential here.
2613 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2614 PointerKind::UniqueBorrowed
2616 PointerKind::UniqueBorrowedPinned
2622 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2624 align: layout.align.abi,
2631 let mut data_variant = match this.variants {
2632 // Within the discriminant field, only the niche itself is
2633 // always initialized, so we only check for a pointer at its
2636 // If the niche is a pointer, it's either valid (according
2637 // to its type), or null (which the niche field's scalar
2638 // validity range encodes). This allows using
2639 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2640 // this will continue to work as long as we don't start
2641 // using more niches than just null (e.g., the first page of
2642 // the address space, or unaligned pointers).
2643 Variants::Multiple {
2644 tag_encoding: TagEncoding::Niche { untagged_variant, .. },
2647 } if this.fields.offset(tag_field) == offset => {
2648 Some(this.for_variant(cx, untagged_variant))
2653 if let Some(variant) = data_variant {
2654 // We're not interested in any unions.
2655 if let FieldsShape::Union(_) = variant.fields {
2656 data_variant = None;
2660 let mut result = None;
2662 if let Some(variant) = data_variant {
2663 let ptr_end = offset + Pointer.size(cx);
2664 for i in 0..variant.fields.count() {
2665 let field_start = variant.fields.offset(i);
2666 if field_start <= offset {
2667 let field = variant.field(cx, i);
2668 result = field.to_result().ok().and_then(|field| {
2669 if ptr_end <= field_start + field.size {
2670 // We found the right field, look inside it.
2672 field.pointee_info_at(cx, offset - field_start);
2678 if result.is_some() {
2685 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2686 if let Some(ref mut pointee) = result {
2687 if let ty::Adt(def, _) = this.ty.kind() {
2688 if def.is_box() && offset.bytes() == 0 {
2689 pointee.safe = Some(PointerKind::UniqueOwned);
2699 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2708 fn is_adt(this: TyAndLayout<'tcx>) -> bool {
2709 matches!(this.ty.kind(), ty::Adt(..))
2712 fn is_never(this: TyAndLayout<'tcx>) -> bool {
2713 this.ty.kind() == &ty::Never
2716 fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
2717 matches!(this.ty.kind(), ty::Tuple(..))
2720 fn is_unit(this: TyAndLayout<'tcx>) -> bool {
2721 matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
2725 impl<'tcx> ty::Instance<'tcx> {
2726 // NOTE(eddyb) this is private to avoid using it from outside of
2727 // `fn_abi_of_instance` - any other uses are either too high-level
2728 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2729 // or should go through `FnAbi` instead, to avoid losing any
2730 // adjustments `fn_abi_of_instance` might be performing.
2731 fn fn_sig_for_fn_abi(
2734 param_env: ty::ParamEnv<'tcx>,
2735 ) -> ty::PolyFnSig<'tcx> {
2736 let ty = self.ty(tcx, param_env);
2739 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2740 // parameters unused if they show up in the signature, but not in the `mir::Body`
2741 // (i.e. due to being inside a projection that got normalized, see
2742 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2743 // track of a polymorphization `ParamEnv` to allow normalizing later.
2744 let mut sig = match *ty.kind() {
2745 ty::FnDef(def_id, substs) => tcx
2746 .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
2747 .subst(tcx, substs),
2748 _ => unreachable!(),
2751 if let ty::InstanceDef::VTableShim(..) = self.def {
2752 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2753 sig = sig.map_bound(|mut sig| {
2754 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2755 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2756 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2762 ty::Closure(def_id, substs) => {
2763 let sig = substs.as_closure().sig();
2765 let bound_vars = tcx.mk_bound_variable_kinds(
2768 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2770 let br = ty::BoundRegion {
2771 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2772 kind: ty::BoundRegionKind::BrEnv,
2774 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2775 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2777 let sig = sig.skip_binder();
2778 ty::Binder::bind_with_vars(
2780 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2789 ty::Generator(_, substs, _) => {
2790 let sig = substs.as_generator().poly_sig();
2792 let bound_vars = tcx.mk_bound_variable_kinds(
2795 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2797 let br = ty::BoundRegion {
2798 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2799 kind: ty::BoundRegionKind::BrEnv,
2801 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2802 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2804 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2805 let pin_adt_ref = tcx.adt_def(pin_did);
2806 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2807 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2809 let sig = sig.skip_binder();
2810 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2811 let state_adt_ref = tcx.adt_def(state_did);
2812 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2813 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2814 ty::Binder::bind_with_vars(
2816 [env_ty, sig.resume_ty].iter(),
2819 hir::Unsafety::Normal,
2820 rustc_target::spec::abi::Abi::Rust,
2825 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2830 /// Calculates whether a function's ABI can unwind or not.
2832 /// This takes two primary parameters:
2834 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2835 /// codegen attrs for a defined function. For function pointers this set of
2836 /// flags is the empty set. This is only applicable for Rust-defined
2837 /// functions, and generally isn't needed except for small optimizations where
2838 /// we try to say a function which otherwise might look like it could unwind
2839 /// doesn't actually unwind (such as for intrinsics and such).
2841 /// * `abi` - this is the ABI that the function is defined with. This is the
2842 /// primary factor for determining whether a function can unwind or not.
2844 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2845 /// panics are implemented with unwinds on most platform (when
2846 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2847 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2848 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2849 /// defined for each ABI individually, but it always corresponds to some form of
2850 /// stack-based unwinding (the exact mechanism of which varies
2851 /// platform-by-platform).
2853 /// Rust functions are classified whether or not they can unwind based on the
2854 /// active "panic strategy". In other words Rust functions are considered to
2855 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2856 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2857 /// only if the final panic mode is panic=abort. In this scenario any code
2858 /// previously compiled assuming that a function can unwind is still correct, it
2859 /// just never happens to actually unwind at runtime.
2861 /// This function's answer to whether or not a function can unwind is quite
2862 /// impactful throughout the compiler. This affects things like:
2864 /// * Calling a function which can't unwind means codegen simply ignores any
2865 /// associated unwinding cleanup.
2866 /// * Calling a function which can unwind from a function which can't unwind
2867 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2868 /// aborts the process.
2869 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2870 /// affects various optimizations and codegen.
2872 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2873 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2874 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2875 /// might (from a foreign exception or similar).
2877 pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
2878 if let Some(did) = fn_def_id {
2879 // Special attribute for functions which can't unwind.
2880 if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2884 // With `-C panic=abort`, all non-FFI functions are required to not unwind.
2886 // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
2887 // function defined in Rust is also required to abort.
2888 if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
2892 // With -Z panic-in-drop=abort, drop_in_place never unwinds.
2894 // This is not part of `codegen_fn_attrs` as it can differ between crates
2895 // and therefore cannot be computed in core.
2896 if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
2897 if Some(did) == tcx.lang_items().drop_in_place_fn() {
2903 // Otherwise if this isn't special then unwinding is generally determined by
2904 // the ABI of the itself. ABIs like `C` have variants which also
2905 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2906 // ABIs have such an option. Otherwise the only other thing here is Rust
2907 // itself, and those ABIs are determined by the panic strategy configured
2908 // for this compilation.
2910 // Unfortunately at this time there's also another caveat. Rust [RFC
2911 // 2945][rfc] has been accepted and is in the process of being implemented
2912 // and stabilized. In this interim state we need to deal with historical
2913 // rustc behavior as well as plan for future rustc behavior.
2915 // Historically functions declared with `extern "C"` were marked at the
2916 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2917 // or not. This is UB for functions in `panic=unwind` mode that then
2918 // actually panic and unwind. Note that this behavior is true for both
2919 // externally declared functions as well as Rust-defined function.
2921 // To fix this UB rustc would like to change in the future to catch unwinds
2922 // from function calls that may unwind within a Rust-defined `extern "C"`
2923 // function and forcibly abort the process, thereby respecting the
2924 // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
2925 // ready to roll out, so determining whether or not the `C` family of ABIs
2926 // unwinds is conditional not only on their definition but also whether the
2927 // `#![feature(c_unwind)]` feature gate is active.
2929 // Note that this means that unlike historical compilers rustc now, by
2930 // default, unconditionally thinks that the `C` ABI may unwind. This will
2931 // prevent some optimization opportunities, however, so we try to scope this
2932 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2933 // to `panic=abort`).
2935 // Eventually the check against `c_unwind` here will ideally get removed and
2936 // this'll be a little cleaner as it'll be a straightforward check of the
2939 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2945 | Stdcall { unwind }
2946 | Fastcall { unwind }
2947 | Vectorcall { unwind }
2948 | Thiscall { unwind }
2951 | SysV64 { unwind } => {
2953 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2961 | AvrNonBlockingInterrupt
2962 | CCmseNonSecureCall
2966 | Unadjusted => false,
2967 Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2972 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2973 use rustc_target::spec::abi::Abi::*;
2974 match tcx.sess.target.adjust_abi(abi) {
2975 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2976 RustCold => Conv::RustCold,
2978 // It's the ABI's job to select this, not ours.
2979 System { .. } => bug!("system abi should be selected elsewhere"),
2980 EfiApi => bug!("eficall abi should be selected elsewhere"),
2982 Stdcall { .. } => Conv::X86Stdcall,
2983 Fastcall { .. } => Conv::X86Fastcall,
2984 Vectorcall { .. } => Conv::X86VectorCall,
2985 Thiscall { .. } => Conv::X86ThisCall,
2986 C { .. } => Conv::C,
2987 Unadjusted => Conv::C,
2988 Win64 { .. } => Conv::X86_64Win64,
2989 SysV64 { .. } => Conv::X86_64SysV,
2990 Aapcs { .. } => Conv::ArmAapcs,
2991 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2992 PtxKernel => Conv::PtxKernel,
2993 Msp430Interrupt => Conv::Msp430Intr,
2994 X86Interrupt => Conv::X86Intr,
2995 AmdGpuKernel => Conv::AmdGpuKernel,
2996 AvrInterrupt => Conv::AvrInterrupt,
2997 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
3000 // These API constants ought to be more specific...
3001 Cdecl { .. } => Conv::C,
3005 /// Error produced by attempting to compute or adjust a `FnAbi`.
3006 #[derive(Copy, Clone, Debug, HashStable)]
3007 pub enum FnAbiError<'tcx> {
3008 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
3009 Layout(LayoutError<'tcx>),
3011 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
3012 AdjustForForeignAbi(call::AdjustForForeignAbiError),
3015 impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
3016 fn from(err: LayoutError<'tcx>) -> Self {
3021 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
3022 fn from(err: call::AdjustForForeignAbiError) -> Self {
3023 Self::AdjustForForeignAbi(err)
3027 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
3028 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3030 Self::Layout(err) => err.fmt(f),
3031 Self::AdjustForForeignAbi(err) => err.fmt(f),
3036 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
3037 // just for error handling.
3039 pub enum FnAbiRequest<'tcx> {
3040 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3041 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
3044 /// Trait for contexts that want to be able to compute `FnAbi`s.
3045 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
3046 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
3047 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
3048 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
3049 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
3051 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
3052 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
3054 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
3055 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
3056 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
3057 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
3058 fn handle_fn_abi_err(
3060 err: FnAbiError<'tcx>,
3062 fn_abi_request: FnAbiRequest<'tcx>,
3063 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
3066 /// Blanket extension trait for contexts that can compute `FnAbi`s.
3067 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
3068 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
3070 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
3071 /// instead, where the instance is an `InstanceDef::Virtual`.
3073 fn fn_abi_of_fn_ptr(
3075 sig: ty::PolyFnSig<'tcx>,
3076 extra_args: &'tcx ty::List<Ty<'tcx>>,
3077 ) -> Self::FnAbiOfResult {
3078 // FIXME(eddyb) get a better `span` here.
3079 let span = self.layout_tcx_at_span();
3080 let tcx = self.tcx().at(span);
3082 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
3083 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
3087 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
3088 /// direct calls to an `fn`.
3090 /// NB: that includes virtual calls, which are represented by "direct calls"
3091 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
3093 fn fn_abi_of_instance(
3095 instance: ty::Instance<'tcx>,
3096 extra_args: &'tcx ty::List<Ty<'tcx>>,
3097 ) -> Self::FnAbiOfResult {
3098 // FIXME(eddyb) get a better `span` here.
3099 let span = self.layout_tcx_at_span();
3100 let tcx = self.tcx().at(span);
3103 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
3104 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
3105 // we can get some kind of span even if one wasn't provided.
3106 // However, we don't do this early in order to avoid calling
3107 // `def_span` unconditionally (which may have a perf penalty).
3108 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
3109 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
3115 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
3117 fn fn_abi_of_fn_ptr<'tcx>(
3119 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3120 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3121 let (param_env, (sig, extra_args)) = query.into_parts();
3123 LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
3126 fn fn_abi_of_instance<'tcx>(
3128 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
3129 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3130 let (param_env, (instance, extra_args)) = query.into_parts();
3132 let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
3134 let caller_location = if instance.def.requires_caller_location(tcx) {
3135 Some(tcx.caller_location_ty())
3140 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
3144 Some(instance.def_id()),
3145 matches!(instance.def, ty::InstanceDef::Virtual(..)),
3149 // Handle safe Rust thin and fat pointers.
3150 pub fn adjust_for_rust_scalar<'tcx>(
3151 cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
3152 attrs: &mut ArgAttributes,
3154 layout: TyAndLayout<'tcx>,
3158 // Booleans are always a noundef i1 that needs to be zero-extended.
3159 if scalar.is_bool() {
3160 attrs.ext(ArgExtension::Zext);
3161 attrs.set(ArgAttribute::NoUndef);
3165 // Scalars which have invalid values cannot be undef.
3166 if !scalar.is_always_valid(&cx) {
3167 attrs.set(ArgAttribute::NoUndef);
3170 // Only pointer types handled below.
3171 let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
3173 if !valid_range.contains(0) {
3174 attrs.set(ArgAttribute::NonNull);
3177 if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
3178 if let Some(kind) = pointee.safe {
3179 attrs.pointee_align = Some(pointee.align);
3181 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3182 // for the entire duration of the function as they can be deallocated
3183 // at any time. Same for shared mutable references. If LLVM had a
3184 // way to say "dereferenceable on entry" we could use it here.
3185 attrs.pointee_size = match kind {
3186 PointerKind::UniqueBorrowed
3187 | PointerKind::UniqueBorrowedPinned
3188 | PointerKind::Frozen => pointee.size,
3189 PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
3192 // `Box`, `&T`, and `&mut T` cannot be undef.
3193 // Note that this only applies to the value of the pointer itself;
3194 // this attribute doesn't make it UB for the pointed-to data to be undef.
3195 attrs.set(ArgAttribute::NoUndef);
3197 // The aliasing rules for `Box<T>` are still not decided, but currently we emit
3198 // `noalias` for it. This can be turned off using an unstable flag.
3199 // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
3200 let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
3202 // `&mut` pointer parameters never alias other parameters,
3203 // or mutable global data
3205 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3206 // and can be marked as both `readonly` and `noalias`, as
3207 // LLVM's definition of `noalias` is based solely on memory
3208 // dependencies rather than pointer equality
3210 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3211 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3212 // or not to actually emit the attribute. It can also be controlled with the
3213 // `-Zmutable-noalias` debugging option.
3214 let no_alias = match kind {
3215 PointerKind::SharedMutable
3216 | PointerKind::UniqueBorrowed
3217 | PointerKind::UniqueBorrowedPinned => false,
3218 PointerKind::UniqueOwned => noalias_for_box,
3219 PointerKind::Frozen => !is_return,
3222 attrs.set(ArgAttribute::NoAlias);
3225 if kind == PointerKind::Frozen && !is_return {
3226 attrs.set(ArgAttribute::ReadOnly);
3229 if kind == PointerKind::UniqueBorrowed && !is_return {
3230 attrs.set(ArgAttribute::NoAliasMutRef);
3236 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
3237 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
3238 // arguments of this method, into a separate `struct`.
3239 fn fn_abi_new_uncached(
3241 sig: ty::PolyFnSig<'tcx>,
3242 extra_args: &[Ty<'tcx>],
3243 caller_location: Option<Ty<'tcx>>,
3244 fn_def_id: Option<DefId>,
3245 // FIXME(eddyb) replace this with something typed, like an `enum`.
3246 force_thin_self_ptr: bool,
3247 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
3248 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
3250 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
3252 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
3254 let mut inputs = sig.inputs();
3255 let extra_args = if sig.abi == RustCall {
3256 assert!(!sig.c_variadic && extra_args.is_empty());
3258 if let Some(input) = sig.inputs().last() {
3259 if let ty::Tuple(tupled_arguments) = input.kind() {
3260 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
3264 "argument to function with \"rust-call\" ABI \
3270 "argument to function with \"rust-call\" ABI \
3275 assert!(sig.c_variadic || extra_args.is_empty());
3279 let target = &self.tcx.sess.target;
3280 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3281 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3282 let linux_s390x_gnu_like =
3283 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3284 let linux_sparc64_gnu_like =
3285 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3286 let linux_powerpc_gnu_like =
3287 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3289 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3291 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3292 let is_return = arg_idx.is_none();
3294 let layout = self.layout_of(ty)?;
3295 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3296 // Don't pass the vtable, it's not an argument of the virtual fn.
3297 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3298 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3299 make_thin_self_ptr(self, layout)
3304 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3305 let mut attrs = ArgAttributes::new();
3306 adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
3310 if arg.layout.is_zst() {
3311 // For some forsaken reason, x86_64-pc-windows-gnu
3312 // doesn't ignore zero-sized struct arguments.
3313 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3317 && !linux_s390x_gnu_like
3318 && !linux_sparc64_gnu_like
3319 && !linux_powerpc_gnu_like)
3321 arg.mode = PassMode::Ignore;
3328 let mut fn_abi = FnAbi {
3329 ret: arg_of(sig.output(), None)?,
3333 .chain(extra_args.iter().copied())
3334 .chain(caller_location)
3336 .map(|(i, ty)| arg_of(ty, Some(i)))
3337 .collect::<Result<_, _>>()?,
3338 c_variadic: sig.c_variadic,
3339 fixed_count: inputs.len() as u32,
3341 can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
3343 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3344 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3345 Ok(self.tcx.arena.alloc(fn_abi))
3348 fn fn_abi_adjust_for_abi(
3350 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3352 ) -> Result<(), FnAbiError<'tcx>> {
3353 if abi == SpecAbi::Unadjusted {
3357 if abi == SpecAbi::Rust
3358 || abi == SpecAbi::RustCall
3359 || abi == SpecAbi::RustIntrinsic
3360 || abi == SpecAbi::PlatformIntrinsic
3362 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3363 if arg.is_ignore() {
3367 match arg.layout.abi {
3368 Abi::Aggregate { .. } => {}
3370 // This is a fun case! The gist of what this is doing is
3371 // that we want callers and callees to always agree on the
3372 // ABI of how they pass SIMD arguments. If we were to *not*
3373 // make these arguments indirect then they'd be immediates
3374 // in LLVM, which means that they'd used whatever the
3375 // appropriate ABI is for the callee and the caller. That
3376 // means, for example, if the caller doesn't have AVX
3377 // enabled but the callee does, then passing an AVX argument
3378 // across this boundary would cause corrupt data to show up.
3380 // This problem is fixed by unconditionally passing SIMD
3381 // arguments through memory between callers and callees
3382 // which should get them all to agree on ABI regardless of
3383 // target feature sets. Some more information about this
3384 // issue can be found in #44367.
3386 // Note that the platform intrinsic ABI is exempt here as
3387 // that's how we connect up to LLVM and it's unstable
3388 // anyway, we control all calls to it in libstd.
3390 if abi != SpecAbi::PlatformIntrinsic
3391 && self.tcx.sess.target.simd_types_indirect =>
3393 arg.make_indirect();
3400 let size = arg.layout.size;
3401 if arg.layout.is_unsized() || size > Pointer.size(self) {
3402 arg.make_indirect();
3404 // We want to pass small aggregates as immediates, but using
3405 // a LLVM aggregate type for this leads to bad optimizations,
3406 // so we pick an appropriately sized integer type instead.
3407 arg.cast_to(Reg { kind: RegKind::Integer, size });
3410 fixup(&mut fn_abi.ret);
3411 for arg in fn_abi.args.iter_mut() {
3415 fn_abi.adjust_for_foreign_abi(self, abi)?;
3422 fn make_thin_self_ptr<'tcx>(
3423 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3424 layout: TyAndLayout<'tcx>,
3425 ) -> TyAndLayout<'tcx> {
3427 let fat_pointer_ty = if layout.is_unsized() {
3428 // unsized `self` is passed as a pointer to `self`
3429 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3430 tcx.mk_mut_ptr(layout.ty)
3433 Abi::ScalarPair(..) => (),
3434 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3437 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3438 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3439 // elsewhere in the compiler as a method on a `dyn Trait`.
3440 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3441 // get a built-in pointer type
3442 let mut fat_pointer_layout = layout;
3443 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3444 && !fat_pointer_layout.ty.is_region_ptr()
3446 for i in 0..fat_pointer_layout.fields.count() {
3447 let field_layout = fat_pointer_layout.field(cx, i);
3449 if !field_layout.is_zst() {
3450 fat_pointer_layout = field_layout;
3451 continue 'descend_newtypes;
3455 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3458 fat_pointer_layout.ty
3461 // we now have a type like `*mut RcBox<dyn Trait>`
3462 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3463 // this is understood as a special case elsewhere in the compiler
3464 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3469 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3470 // should always work because the type is always `*mut ()`.
3471 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()