1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
4 use rustc_ast::ast::{self, Ident, IntTy, UintTy};
5 use rustc_attr as attr;
6 use rustc_span::DUMMY_SP;
14 use crate::ich::StableHashingContext;
15 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
16 use crate::ty::subst::Subst;
17 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
19 use rustc_index::bit_set::BitSet;
20 use rustc_index::vec::{Idx, IndexVec};
22 use rustc_target::abi::call::{
23 ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
25 pub use rustc_target::abi::*;
26 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec};
28 pub trait IntegerExt {
29 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
30 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
40 impl IntegerExt for Integer {
41 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
42 match (*self, signed) {
43 (I8, false) => tcx.types.u8,
44 (I16, false) => tcx.types.u16,
45 (I32, false) => tcx.types.u32,
46 (I64, false) => tcx.types.u64,
47 (I128, false) => tcx.types.u128,
48 (I8, true) => tcx.types.i8,
49 (I16, true) => tcx.types.i16,
50 (I32, true) => tcx.types.i32,
51 (I64, true) => tcx.types.i64,
52 (I128, true) => tcx.types.i128,
56 /// Gets the Integer type from an attr::IntType.
57 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
58 let dl = cx.data_layout();
61 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
62 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
63 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
64 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
65 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
66 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
67 dl.ptr_sized_integer()
72 /// Finds the appropriate Integer type and signedness for the given
73 /// signed discriminant range and #[repr] attribute.
74 /// N.B.: u128 values above i128::MAX will be treated as signed, but
75 /// that shouldn't affect anything, other than maybe debuginfo.
82 ) -> (Integer, bool) {
83 // Theoretically, negative values could be larger in unsigned representation
84 // than the unsigned representation of the signed minimum. However, if there
85 // are any negative values, the only valid unsigned representation is u128
86 // which can fit all i128 values, so the result remains unaffected.
87 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
88 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
90 let mut min_from_extern = None;
93 if let Some(ity) = repr.int {
94 let discr = Integer::from_attr(&tcx, ity);
95 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98 "Integer::repr_discr: `#[repr]` hint too small for \
99 discriminant range of enum `{}",
103 return (discr, ity.is_signed());
107 match &tcx.sess.target.target.arch[..] {
108 // WARNING: the ARM EABI has two variants; the one corresponding
109 // to `at_least == I32` appears to be used on Linux and NetBSD,
110 // but some systems may use the variant corresponding to no
111 // lower bound. However, we don't run on those yet...?
112 "arm" => min_from_extern = Some(I32),
113 _ => min_from_extern = Some(I32),
117 let at_least = min_from_extern.unwrap_or(min_default);
119 // If there are no negative values, we can use the unsigned fit.
121 (cmp::max(unsigned_fit, at_least), false)
123 (cmp::max(signed_fit, at_least), true)
128 pub trait PrimitiveExt {
129 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
130 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
133 impl PrimitiveExt for Primitive {
134 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
136 Int(i, signed) => i.to_ty(tcx, signed),
137 F32 => tcx.types.f32,
138 F64 => tcx.types.f64,
139 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
143 /// Return an *integer* type matching this primitive.
144 /// Useful in particular when dealing with enum discriminants.
145 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
147 Int(i, signed) => i.to_ty(tcx, signed),
148 Pointer => tcx.types.usize,
149 F32 | F64 => bug!("floats do not have an int type"),
154 /// The first half of a fat pointer.
156 /// - For a trait object, this is the address of the box.
157 /// - For a slice, this is the base address.
158 pub const FAT_PTR_ADDR: usize = 0;
160 /// The second half of a fat pointer.
162 /// - For a trait object, this is the address of the vtable.
163 /// - For a slice, this is the length.
164 pub const FAT_PTR_EXTRA: usize = 1;
166 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
167 pub enum LayoutError<'tcx> {
169 SizeOverflow(Ty<'tcx>),
172 impl<'tcx> fmt::Display for LayoutError<'tcx> {
173 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
175 LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
176 LayoutError::SizeOverflow(ty) => {
177 write!(f, "the type `{:?}` is too big for the current architecture", ty)
185 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
186 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
187 ty::tls::with_related_context(tcx, move |icx| {
188 let rec_limit = *tcx.sess.recursion_limit.get();
189 let (param_env, ty) = query.into_parts();
191 if icx.layout_depth > rec_limit {
192 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
195 // Update the ImplicitCtxt to increase the layout_depth
196 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
198 ty::tls::enter_context(&icx, |_| {
199 let cx = LayoutCx { tcx, param_env };
200 let layout = cx.layout_raw_uncached(ty);
201 // Type-level uninhabitedness should always imply ABI uninhabitedness.
202 if let Ok(layout) = layout {
203 if ty.conservative_is_privately_uninhabited(tcx) {
204 assert!(layout.abi.is_uninhabited());
212 pub fn provide(providers: &mut ty::query::Providers<'_>) {
213 *providers = ty::query::Providers { layout_raw, ..*providers };
216 pub struct LayoutCx<'tcx, C> {
218 pub param_env: ty::ParamEnv<'tcx>,
221 #[derive(Copy, Clone, Debug)]
223 /// A tuple, closure, or univariant which cannot be coerced to unsized.
225 /// A univariant, the last field of which may be coerced to unsized.
227 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
228 Prefixed(Size, Align),
231 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
232 // This is used to go between `memory_index` (source field order to memory order)
233 // and `inverse_memory_index` (memory order to source field order).
234 // See also `FieldPlacement::Arbitrary::memory_index` for more details.
235 // FIXME(eddyb) build a better abstraction for permutations, if possible.
236 fn invert_mapping(map: &[u32]) -> Vec<u32> {
237 let mut inverse = vec![0; map.len()];
238 for i in 0..map.len() {
239 inverse[map[i] as usize] = i as u32;
244 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
245 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
246 let dl = self.data_layout();
247 let b_align = b.value.align(dl);
248 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
249 let b_offset = a.value.size(dl).align_to(b_align.abi);
250 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
252 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
253 // returns the last maximum.
254 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
257 .max_by_key(|niche| niche.available(dl));
260 variants: Variants::Single { index: VariantIdx::new(0) },
261 fields: FieldPlacement::Arbitrary {
262 offsets: vec![Size::ZERO, b_offset],
263 memory_index: vec![0, 1],
265 abi: Abi::ScalarPair(a, b),
272 fn univariant_uninterned(
275 fields: &[TyLayout<'_>],
278 ) -> Result<LayoutDetails, LayoutError<'tcx>> {
279 let dl = self.data_layout();
280 let pack = repr.pack;
281 if pack.is_some() && repr.align.is_some() {
282 bug!("struct cannot be packed and aligned");
285 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
287 let mut sized = true;
288 let mut offsets = vec![Size::ZERO; fields.len()];
289 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
291 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
292 if let StructKind::Prefixed(_, align) = kind {
293 optimize &= align.bytes() == 1;
298 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
299 let optimizing = &mut inverse_memory_index[..end];
300 let field_align = |f: &TyLayout<'_>| {
301 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
304 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
305 optimizing.sort_by_key(|&x| {
306 // Place ZSTs first to avoid "interesting offsets",
307 // especially with only one or two non-ZST fields.
308 let f = &fields[x as usize];
309 (!f.is_zst(), cmp::Reverse(field_align(f)))
312 StructKind::Prefixed(..) => {
313 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
318 // inverse_memory_index holds field indices by increasing memory offset.
319 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
320 // We now write field offsets to the corresponding offset slot;
321 // field 5 with offset 0 puts 0 in offsets[5].
322 // At the bottom of this function, we invert `inverse_memory_index` to
323 // produce `memory_index` (see `invert_mapping`).
325 let mut offset = Size::ZERO;
326 let mut largest_niche = None;
327 let mut largest_niche_available = 0;
329 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
331 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
332 align = align.max(AbiAndPrefAlign::new(prefix_align));
333 offset = prefix_size.align_to(prefix_align);
336 for &i in &inverse_memory_index {
337 let field = fields[i as usize];
339 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
342 if field.is_unsized() {
346 // Invariant: offset < dl.obj_size_bound() <= 1<<61
347 let field_align = if let Some(pack) = pack {
348 field.align.min(AbiAndPrefAlign::new(pack))
352 offset = offset.align_to(field_align.abi);
353 align = align.max(field_align);
355 debug!("univariant offset: {:?} field: {:#?}", offset, field);
356 offsets[i as usize] = offset;
358 if !repr.hide_niche() {
359 if let Some(mut niche) = field.largest_niche.clone() {
360 let available = niche.available(dl);
361 if available > largest_niche_available {
362 largest_niche_available = available;
363 niche.offset += offset;
364 largest_niche = Some(niche);
369 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
372 if let Some(repr_align) = repr.align {
373 align = align.max(AbiAndPrefAlign::new(repr_align));
376 debug!("univariant min_size: {:?}", offset);
377 let min_size = offset;
379 // As stated above, inverse_memory_index holds field indices by increasing offset.
380 // This makes it an already-sorted view of the offsets vec.
381 // To invert it, consider:
382 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
383 // Field 5 would be the first element, so memory_index is i:
384 // Note: if we didn't optimize, it's already right.
388 memory_index = invert_mapping(&inverse_memory_index);
390 memory_index = inverse_memory_index;
393 let size = min_size.align_to(align.abi);
394 let mut abi = Abi::Aggregate { sized };
396 // Unpack newtype ABIs and find scalar pairs.
397 if sized && size.bytes() > 0 {
398 // All other fields must be ZSTs, and we need them to all start at 0.
399 let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
400 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
401 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
403 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
404 // We have exactly one non-ZST field.
405 (Some((i, field)), None, None) => {
406 // Field fills the struct and it has a scalar or scalar pair ABI.
407 if offsets[i].bytes() == 0
408 && align.abi == field.align.abi
409 && size == field.size
412 // For plain scalars, or vectors of them, we can't unpack
413 // newtypes for `#[repr(C)]`, as that affects C ABIs.
414 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
415 abi = field.abi.clone();
417 // But scalar pairs are Rust-specific and get
418 // treated as aggregates by C ABIs anyway.
419 Abi::ScalarPair(..) => {
420 abi = field.abi.clone();
427 // Two non-ZST fields, and they're both scalars.
432 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. },
439 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. },
445 // Order by the memory placement, not source order.
446 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
451 let pair = self.scalar_pair(a.clone(), b.clone());
452 let pair_offsets = match pair.fields {
453 FieldPlacement::Arbitrary { ref offsets, ref memory_index } => {
454 assert_eq!(memory_index, &[0, 1]);
459 if offsets[i] == pair_offsets[0]
460 && offsets[j] == pair_offsets[1]
461 && align == pair.align
464 // We can use `ScalarPair` only when it matches our
465 // already computed layout (including `#[repr(C)]`).
475 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
476 abi = Abi::Uninhabited;
480 variants: Variants::Single { index: VariantIdx::new(0) },
481 fields: FieldPlacement::Arbitrary { offsets, memory_index },
489 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
491 let param_env = self.param_env;
492 let dl = self.data_layout();
493 let scalar_unit = |value: Primitive| {
494 let bits = value.size(dl).bits();
495 assert!(bits <= 128);
496 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
499 |value: Primitive| tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)));
501 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
502 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
504 debug_assert!(!ty.has_infer_types_or_consts());
508 ty::Bool => tcx.intern_layout(LayoutDetails::scalar(
510 Scalar { value: Int(I8, false), valid_range: 0..=1 },
512 ty::Char => tcx.intern_layout(LayoutDetails::scalar(
514 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
516 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
517 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
518 ty::Float(fty) => scalar(match fty {
519 ast::FloatTy::F32 => F32,
520 ast::FloatTy::F64 => F64,
523 let mut ptr = scalar_unit(Pointer);
524 ptr.valid_range = 1..=*ptr.valid_range.end();
525 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
529 ty::Never => tcx.intern_layout(LayoutDetails {
530 variants: Variants::Single { index: VariantIdx::new(0) },
531 fields: FieldPlacement::Union(0),
532 abi: Abi::Uninhabited,
538 // Potentially-fat pointers.
539 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
540 let mut data_ptr = scalar_unit(Pointer);
541 if !ty.is_unsafe_ptr() {
542 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
545 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
546 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
547 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
550 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
551 let metadata = match unsized_part.kind {
553 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
555 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
557 let mut vtable = scalar_unit(Pointer);
558 vtable.valid_range = 1..=*vtable.valid_range.end();
561 _ => return Err(LayoutError::Unknown(unsized_part)),
564 // Effectively a (ptr, meta) tuple.
565 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
568 // Arrays and slices.
569 ty::Array(element, mut count) => {
570 if count.has_projections() {
571 count = tcx.normalize_erasing_regions(param_env, count);
572 if count.has_projections() {
573 return Err(LayoutError::Unknown(ty));
577 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
578 let element = self.layout_of(element)?;
580 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
582 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
585 Abi::Aggregate { sized: true }
588 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
590 tcx.intern_layout(LayoutDetails {
591 variants: Variants::Single { index: VariantIdx::new(0) },
592 fields: FieldPlacement::Array { stride: element.size, count },
595 align: element.align,
599 ty::Slice(element) => {
600 let element = self.layout_of(element)?;
601 tcx.intern_layout(LayoutDetails {
602 variants: Variants::Single { index: VariantIdx::new(0) },
603 fields: FieldPlacement::Array { stride: element.size, count: 0 },
604 abi: Abi::Aggregate { sized: false },
606 align: element.align,
610 ty::Str => tcx.intern_layout(LayoutDetails {
611 variants: Variants::Single { index: VariantIdx::new(0) },
612 fields: FieldPlacement::Array { stride: Size::from_bytes(1), count: 0 },
613 abi: Abi::Aggregate { sized: false },
620 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
621 ty::Dynamic(..) | ty::Foreign(..) => {
622 let mut unit = self.univariant_uninterned(
625 &ReprOptions::default(),
626 StructKind::AlwaysSized,
629 Abi::Aggregate { ref mut sized } => *sized = false,
632 tcx.intern_layout(unit)
635 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
637 ty::Closure(def_id, ref substs) => {
638 let tys = substs.as_closure().upvar_tys(def_id, tcx);
640 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
641 &ReprOptions::default(),
642 StructKind::AlwaysSized,
648 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
652 .map(|k| self.layout_of(k.expect_ty()))
653 .collect::<Result<Vec<_>, _>>()?,
654 &ReprOptions::default(),
659 // SIMD vector types.
660 ty::Adt(def, ..) if def.repr.simd() => {
661 let element = self.layout_of(ty.simd_type(tcx))?;
662 let count = ty.simd_size(tcx);
664 let scalar = match element.abi {
665 Abi::Scalar(ref scalar) => scalar.clone(),
667 tcx.sess.fatal(&format!(
668 "monomorphising SIMD type `{}` with \
669 a non-machine element type `{}`",
675 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
676 let align = dl.vector_align(size);
677 let size = size.align_to(align.abi);
679 tcx.intern_layout(LayoutDetails {
680 variants: Variants::Single { index: VariantIdx::new(0) },
681 fields: FieldPlacement::Array { stride: element.size, count },
682 abi: Abi::Vector { element: scalar, count },
683 largest_niche: element.largest_niche.clone(),
690 ty::Adt(def, substs) => {
691 // Cache the field layouts.
698 .map(|field| self.layout_of(field.ty(tcx, substs)))
699 .collect::<Result<Vec<_>, _>>()
701 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
704 if def.repr.pack.is_some() && def.repr.align.is_some() {
705 bug!("union cannot be packed and aligned");
709 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
711 if let Some(repr_align) = def.repr.align {
712 align = align.max(AbiAndPrefAlign::new(repr_align));
715 let optimize = !def.repr.inhibit_union_abi_opt();
716 let mut size = Size::ZERO;
717 let mut abi = Abi::Aggregate { sized: true };
718 let index = VariantIdx::new(0);
719 for field in &variants[index] {
720 assert!(!field.is_unsized());
721 align = align.max(field.align);
723 // If all non-ZST fields have the same ABI, forward this ABI
724 if optimize && !field.is_zst() {
725 // Normalize scalar_unit to the maximal valid range
726 let field_abi = match &field.abi {
727 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
728 Abi::ScalarPair(x, y) => {
729 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
731 Abi::Vector { element: x, count } => {
732 Abi::Vector { element: scalar_unit(x.value), count: *count }
734 Abi::Uninhabited | Abi::Aggregate { .. } => {
735 Abi::Aggregate { sized: true }
739 if size == Size::ZERO {
740 // first non ZST: initialize 'abi'
742 } else if abi != field_abi {
743 // different fields have different ABI: reset to Aggregate
744 abi = Abi::Aggregate { sized: true };
748 size = cmp::max(size, field.size);
751 if let Some(pack) = def.repr.pack {
752 align = align.min(AbiAndPrefAlign::new(pack));
755 return Ok(tcx.intern_layout(LayoutDetails {
756 variants: Variants::Single { index },
757 fields: FieldPlacement::Union(variants[index].len()),
761 size: size.align_to(align.abi),
765 // A variant is absent if it's uninhabited and only has ZST fields.
766 // Present uninhabited variants only require space for their fields,
767 // but *not* an encoding of the discriminant (e.g., a tag value).
768 // See issue #49298 for more details on the need to leave space
769 // for non-ZST uninhabited data (mostly partial initialization).
770 let absent = |fields: &[TyLayout<'_>]| {
771 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
772 let is_zst = fields.iter().all(|f| f.is_zst());
773 uninhabited && is_zst
775 let (present_first, present_second) = {
776 let mut present_variants = variants
778 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
779 (present_variants.next(), present_variants.next())
781 let present_first = match present_first {
782 present_first @ Some(_) => present_first,
783 // Uninhabited because it has no variants, or only absent ones.
784 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
785 // if it's a struct, still compute a layout so that we can still compute the
787 None => Some(VariantIdx::new(0)),
790 let is_struct = !def.is_enum() ||
791 // Only one variant is present.
792 (present_second.is_none() &&
793 // Representation optimizations are allowed.
794 !def.repr.inhibit_enum_layout_opt());
796 // Struct, or univariant enum equivalent to a struct.
797 // (Typechecking will reject discriminant-sizing attrs.)
799 let v = present_first.unwrap();
800 let kind = if def.is_enum() || variants[v].is_empty() {
801 StructKind::AlwaysSized
803 let param_env = tcx.param_env(def.did);
804 let last_field = def.variants[v].fields.last().unwrap();
806 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
808 StructKind::MaybeUnsized
810 StructKind::AlwaysSized
814 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
815 st.variants = Variants::Single { index: v };
816 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
818 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
819 // the asserts ensure that we are not using the
820 // `#[rustc_layout_scalar_valid_range(n)]`
821 // attribute to widen the range of anything as that would probably
822 // result in UB somewhere
823 // FIXME(eddyb) the asserts are probably not needed,
824 // as larger validity ranges would result in missed
825 // optimizations, *not* wrongly assuming the inner
826 // value is valid. e.g. unions enlarge validity ranges,
827 // because the values may be uninitialized.
828 if let Bound::Included(start) = start {
829 // FIXME(eddyb) this might be incorrect - it doesn't
830 // account for wrap-around (end < start) ranges.
831 assert!(*scalar.valid_range.start() <= start);
832 scalar.valid_range = start..=*scalar.valid_range.end();
834 if let Bound::Included(end) = end {
835 // FIXME(eddyb) this might be incorrect - it doesn't
836 // account for wrap-around (end < start) ranges.
837 assert!(*scalar.valid_range.end() >= end);
838 scalar.valid_range = *scalar.valid_range.start()..=end;
841 // Update `largest_niche` if we have introduced a larger niche.
842 let niche = if def.repr.hide_niche() {
845 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
847 if let Some(niche) = niche {
848 match &st.largest_niche {
849 Some(largest_niche) => {
850 // Replace the existing niche even if they're equal,
851 // because this one is at a lower offset.
852 if largest_niche.available(dl) <= niche.available(dl) {
853 st.largest_niche = Some(niche);
856 None => st.largest_niche = Some(niche),
861 start == Bound::Unbounded && end == Bound::Unbounded,
862 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
868 return Ok(tcx.intern_layout(st));
871 // At this point, we have handled all unions and
872 // structs. (We have also handled univariant enums
873 // that allow representation optimization.)
874 assert!(def.is_enum());
876 // The current code for niche-filling relies on variant indices
877 // instead of actual discriminants, so dataful enums with
878 // explicit discriminants (RFC #2363) would misbehave.
879 let no_explicit_discriminants = def
882 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
884 // Niche-filling enum optimization.
885 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
886 let mut dataful_variant = None;
887 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
889 // Find one non-ZST variant.
890 'variants: for (v, fields) in variants.iter_enumerated() {
896 if dataful_variant.is_none() {
897 dataful_variant = Some(v);
900 dataful_variant = None;
905 niche_variants = *niche_variants.start().min(&v)..=v;
908 if niche_variants.start() > niche_variants.end() {
909 dataful_variant = None;
912 if let Some(i) = dataful_variant {
913 let count = (niche_variants.end().as_u32()
914 - niche_variants.start().as_u32()
916 // FIXME(#62691) use the largest niche across all fields,
917 // not just the first one.
918 for (field_index, &field) in variants[i].iter().enumerate() {
919 let niche = match &field.largest_niche {
920 Some(niche) => niche,
923 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
928 let mut align = dl.aggregate_align;
932 let mut st = self.univariant_uninterned(
936 StructKind::AlwaysSized,
938 st.variants = Variants::Single { index: j };
940 align = align.max(st.align);
944 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
946 let offset = st[i].fields.offset(field_index) + niche.offset;
947 let size = st[i].size;
949 let mut abi = match st[i].abi {
950 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
951 Abi::ScalarPair(ref first, ref second) => {
952 // We need to use scalar_unit to reset the
953 // valid range to the maximal one for that
954 // primitive, because only the niche is
955 // guaranteed to be initialised, not the
957 if offset.bytes() == 0 {
959 niche_scalar.clone(),
960 scalar_unit(second.value),
964 scalar_unit(first.value),
965 niche_scalar.clone(),
969 _ => Abi::Aggregate { sized: true },
972 if st.iter().all(|v| v.abi.is_uninhabited()) {
973 abi = Abi::Uninhabited;
977 Niche::from_scalar(dl, offset, niche_scalar.clone());
979 return Ok(tcx.intern_layout(LayoutDetails {
980 variants: Variants::Multiple {
982 discr_kind: DiscriminantKind::Niche {
990 fields: FieldPlacement::Arbitrary {
991 offsets: vec![offset],
992 memory_index: vec![0],
1003 let (mut min, mut max) = (i128::MAX, i128::MIN);
1004 let discr_type = def.repr.discr_type();
1005 let bits = Integer::from_attr(self, discr_type).size().bits();
1006 for (i, discr) in def.discriminants(tcx) {
1007 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1010 let mut x = discr.val as i128;
1011 if discr_type.is_signed() {
1012 // sign extend the raw representation to be an i128
1013 x = (x << (128 - bits)) >> (128 - bits);
1022 // We might have no inhabited variants, so pretend there's at least one.
1023 if (min, max) == (i128::MAX, i128::MIN) {
1027 assert!(min <= max, "discriminant range is {}...{}", min, max);
1028 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1030 let mut align = dl.aggregate_align;
1031 let mut size = Size::ZERO;
1033 // We're interested in the smallest alignment, so start large.
1034 let mut start_align = Align::from_bytes(256).unwrap();
1035 assert_eq!(Integer::for_align(dl, start_align), None);
1037 // repr(C) on an enum tells us to make a (tag, union) layout,
1038 // so we need to grow the prefix alignment to be at least
1039 // the alignment of the union. (This value is used both for
1040 // determining the alignment of the overall enum, and the
1041 // determining the alignment of the payload after the tag.)
1042 let mut prefix_align = min_ity.align(dl).abi;
1044 for fields in &variants {
1045 for field in fields {
1046 prefix_align = prefix_align.max(field.align.abi);
1051 // Create the set of structs that represent each variant.
1052 let mut layout_variants = variants
1054 .map(|(i, field_layouts)| {
1055 let mut st = self.univariant_uninterned(
1059 StructKind::Prefixed(min_ity.size(), prefix_align),
1061 st.variants = Variants::Single { index: i };
1062 // Find the first field we can't move later
1063 // to make room for a larger discriminant.
1065 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1067 if !field.is_zst() || field.align.abi.bytes() != 1 {
1068 start_align = start_align.min(field.align.abi);
1072 size = cmp::max(size, st.size);
1073 align = align.max(st.align);
1076 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1078 // Align the maximum variant size to the largest alignment.
1079 size = size.align_to(align.abi);
1081 if size.bytes() >= dl.obj_size_bound() {
1082 return Err(LayoutError::SizeOverflow(ty));
1085 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1086 if typeck_ity < min_ity {
1087 // It is a bug if Layout decided on a greater discriminant size than typeck for
1088 // some reason at this point (based on values discriminant can take on). Mostly
1089 // because this discriminant will be loaded, and then stored into variable of
1090 // type calculated by typeck. Consider such case (a bug): typeck decided on
1091 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1092 // discriminant values. That would be a bug, because then, in codegen, in order
1093 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1094 // space necessary to represent would have to be discarded (or layout is wrong
1095 // on thinking it needs 16 bits)
1097 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1101 // However, it is fine to make discr type however large (as an optimisation)
1102 // after this point – we’ll just truncate the value we load in codegen.
1105 // Check to see if we should use a different type for the
1106 // discriminant. We can safely use a type with the same size
1107 // as the alignment of the first field of each variant.
1108 // We increase the size of the discriminant to avoid LLVM copying
1109 // padding when it doesn't need to. This normally causes unaligned
1110 // load/stores and excessive memcpy/memset operations. By using a
1111 // bigger integer size, LLVM can be sure about its contents and
1112 // won't be so conservative.
1114 // Use the initial field alignment
1115 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1118 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1121 // If the alignment is not larger than the chosen discriminant size,
1122 // don't use the alignment as the final size.
1126 // Patch up the variants' first few fields.
1127 let old_ity_size = min_ity.size();
1128 let new_ity_size = ity.size();
1129 for variant in &mut layout_variants {
1130 match variant.fields {
1131 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1133 if *i <= old_ity_size {
1134 assert_eq!(*i, old_ity_size);
1138 // We might be making the struct larger.
1139 if variant.size <= old_ity_size {
1140 variant.size = new_ity_size;
1148 let tag_mask = !0u128 >> (128 - ity.size().bits());
1150 value: Int(ity, signed),
1151 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1153 let mut abi = Abi::Aggregate { sized: true };
1154 if tag.value.size(dl) == size {
1155 abi = Abi::Scalar(tag.clone());
1157 // Try to use a ScalarPair for all tagged enums.
1158 let mut common_prim = None;
1159 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1160 let offsets = match layout_variant.fields {
1161 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1165 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1166 let (field, offset) = match (fields.next(), fields.next()) {
1167 (None, None) => continue,
1168 (Some(pair), None) => pair,
1174 let prim = match field.details.abi {
1175 Abi::Scalar(ref scalar) => scalar.value,
1181 if let Some(pair) = common_prim {
1182 // This is pretty conservative. We could go fancier
1183 // by conflating things like i32 and u32, or even
1184 // realising that (u8, u8) could just cohabit with
1186 if pair != (prim, offset) {
1191 common_prim = Some((prim, offset));
1194 if let Some((prim, offset)) = common_prim {
1195 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1196 let pair_offsets = match pair.fields {
1197 FieldPlacement::Arbitrary { ref offsets, ref memory_index } => {
1198 assert_eq!(memory_index, &[0, 1]);
1203 if pair_offsets[0] == Size::ZERO
1204 && pair_offsets[1] == *offset
1205 && align == pair.align
1206 && size == pair.size
1208 // We can use `ScalarPair` only when it matches our
1209 // already computed layout (including `#[repr(C)]`).
1215 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1216 abi = Abi::Uninhabited;
1219 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1221 tcx.intern_layout(LayoutDetails {
1222 variants: Variants::Multiple {
1224 discr_kind: DiscriminantKind::Tag,
1226 variants: layout_variants,
1228 fields: FieldPlacement::Arbitrary {
1229 offsets: vec![Size::ZERO],
1230 memory_index: vec![0],
1239 // Types with no meaningful known layout.
1240 ty::Projection(_) | ty::Opaque(..) => {
1241 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1242 if ty == normalized {
1243 return Err(LayoutError::Unknown(ty));
1245 tcx.layout_raw(param_env.and(normalized))?
1249 | ty::Placeholder(..)
1250 | ty::UnnormalizedProjection(..)
1251 | ty::GeneratorWitness(..)
1252 | ty::Infer(_) => bug!("LayoutDetails::compute: unexpected type `{}`", ty),
1254 ty::Param(_) | ty::Error => {
1255 return Err(LayoutError::Unknown(ty));
1261 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1262 #[derive(Clone, Debug, PartialEq)]
1263 enum SavedLocalEligibility {
1265 Assigned(VariantIdx),
1266 // FIXME: Use newtype_index so we aren't wasting bytes
1267 Ineligible(Option<u32>),
1270 // When laying out generators, we divide our saved local fields into two
1271 // categories: overlap-eligible and overlap-ineligible.
1273 // Those fields which are ineligible for overlap go in a "prefix" at the
1274 // beginning of the layout, and always have space reserved for them.
1276 // Overlap-eligible fields are only assigned to one variant, so we lay
1277 // those fields out for each variant and put them right after the
1280 // Finally, in the layout details, we point to the fields from the
1281 // variants they are assigned to. It is possible for some fields to be
1282 // included in multiple variants. No field ever "moves around" in the
1283 // layout; its offset is always the same.
1285 // Also included in the layout are the upvars and the discriminant.
1286 // These are included as fields on the "outer" layout; they are not part
1288 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1289 /// Compute the eligibility and assignment of each local.
1290 fn generator_saved_local_eligibility(
1292 info: &GeneratorLayout<'tcx>,
1293 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1294 use SavedLocalEligibility::*;
1296 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1297 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1299 // The saved locals not eligible for overlap. These will get
1300 // "promoted" to the prefix of our generator.
1301 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1303 // Figure out which of our saved locals are fields in only
1304 // one variant. The rest are deemed ineligible for overlap.
1305 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1306 for local in fields {
1307 match assignments[*local] {
1309 assignments[*local] = Assigned(variant_index);
1312 // We've already seen this local at another suspension
1313 // point, so it is no longer a candidate.
1315 "removing local {:?} in >1 variant ({:?}, {:?})",
1320 ineligible_locals.insert(*local);
1321 assignments[*local] = Ineligible(None);
1328 // Next, check every pair of eligible locals to see if they
1330 for local_a in info.storage_conflicts.rows() {
1331 let conflicts_a = info.storage_conflicts.count(local_a);
1332 if ineligible_locals.contains(local_a) {
1336 for local_b in info.storage_conflicts.iter(local_a) {
1337 // local_a and local_b are storage live at the same time, therefore they
1338 // cannot overlap in the generator layout. The only way to guarantee
1339 // this is if they are in the same variant, or one is ineligible
1340 // (which means it is stored in every variant).
1341 if ineligible_locals.contains(local_b)
1342 || assignments[local_a] == assignments[local_b]
1347 // If they conflict, we will choose one to make ineligible.
1348 // This is not always optimal; it's just a greedy heuristic that
1349 // seems to produce good results most of the time.
1350 let conflicts_b = info.storage_conflicts.count(local_b);
1351 let (remove, other) =
1352 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1353 ineligible_locals.insert(remove);
1354 assignments[remove] = Ineligible(None);
1355 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1359 // Count the number of variants in use. If only one of them, then it is
1360 // impossible to overlap any locals in our layout. In this case it's
1361 // always better to make the remaining locals ineligible, so we can
1362 // lay them out with the other locals in the prefix and eliminate
1363 // unnecessary padding bytes.
1365 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1366 for assignment in &assignments {
1369 used_variants.insert(*idx);
1374 if used_variants.count() < 2 {
1375 for assignment in assignments.iter_mut() {
1376 *assignment = Ineligible(None);
1378 ineligible_locals.insert_all();
1382 // Write down the order of our locals that will be promoted to the prefix.
1384 for (idx, local) in ineligible_locals.iter().enumerate() {
1385 assignments[local] = Ineligible(Some(idx as u32));
1388 debug!("generator saved local assignments: {:?}", assignments);
1390 (ineligible_locals, assignments)
1393 /// Compute the full generator layout.
1394 fn generator_layout(
1397 def_id: hir::def_id::DefId,
1398 substs: SubstsRef<'tcx>,
1399 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1400 use SavedLocalEligibility::*;
1403 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1405 let info = tcx.generator_layout(def_id);
1406 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1408 // Build a prefix layout, including "promoting" all ineligible
1409 // locals as part of the prefix. We compute the layout of all of
1410 // these fields at once to get optimal packing.
1411 let discr_index = substs.as_generator().prefix_tys(def_id, tcx).count();
1413 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1414 let max_discr = (info.variant_fields.len() - 1) as u128;
1415 let discr_int = Integer::fit_unsigned(max_discr);
1416 let discr_int_ty = discr_int.to_ty(tcx, false);
1417 let discr = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1418 let discr_layout = self.tcx.intern_layout(LayoutDetails::scalar(self, discr.clone()));
1419 let discr_layout = TyLayout { ty: discr_int_ty, details: discr_layout };
1421 let promoted_layouts = ineligible_locals
1423 .map(|local| subst_field(info.field_tys[local]))
1424 .map(|ty| tcx.mk_maybe_uninit(ty))
1425 .map(|ty| self.layout_of(ty));
1426 let prefix_layouts = substs
1428 .prefix_tys(def_id, tcx)
1429 .map(|ty| self.layout_of(ty))
1430 .chain(iter::once(Ok(discr_layout)))
1431 .chain(promoted_layouts)
1432 .collect::<Result<Vec<_>, _>>()?;
1433 let prefix = self.univariant_uninterned(
1436 &ReprOptions::default(),
1437 StructKind::AlwaysSized,
1440 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1442 // Split the prefix layout into the "outer" fields (upvars and
1443 // discriminant) and the "promoted" fields. Promoted fields will
1444 // get included in each variant that requested them in
1446 debug!("prefix = {:#?}", prefix);
1447 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1448 FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1449 let mut inverse_memory_index = invert_mapping(&memory_index);
1451 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1452 // "outer" and "promoted" fields respectively.
1453 let b_start = (discr_index + 1) as u32;
1454 let offsets_b = offsets.split_off(b_start as usize);
1455 let offsets_a = offsets;
1457 // Disentangle the "a" and "b" components of `inverse_memory_index`
1458 // by preserving the order but keeping only one disjoint "half" each.
1459 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1460 let inverse_memory_index_b: Vec<_> =
1461 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1462 inverse_memory_index.retain(|&i| i < b_start);
1463 let inverse_memory_index_a = inverse_memory_index;
1465 // Since `inverse_memory_index_{a,b}` each only refer to their
1466 // respective fields, they can be safely inverted
1467 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1468 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1471 FieldPlacement::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1472 (outer_fields, offsets_b, memory_index_b)
1477 let mut size = prefix.size;
1478 let mut align = prefix.align;
1482 .map(|(index, variant_fields)| {
1483 // Only include overlap-eligible fields when we compute our variant layout.
1484 let variant_only_tys = variant_fields
1486 .filter(|local| match assignments[**local] {
1487 Unassigned => bug!(),
1488 Assigned(v) if v == index => true,
1489 Assigned(_) => bug!("assignment does not match variant"),
1490 Ineligible(_) => false,
1492 .map(|local| subst_field(info.field_tys[*local]));
1494 let mut variant = self.univariant_uninterned(
1497 .map(|ty| self.layout_of(ty))
1498 .collect::<Result<Vec<_>, _>>()?,
1499 &ReprOptions::default(),
1500 StructKind::Prefixed(prefix_size, prefix_align.abi),
1502 variant.variants = Variants::Single { index };
1504 let (offsets, memory_index) = match variant.fields {
1505 FieldPlacement::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1509 // Now, stitch the promoted and variant-only fields back together in
1510 // the order they are mentioned by our GeneratorLayout.
1511 // Because we only use some subset (that can differ between variants)
1512 // of the promoted fields, we can't just pick those elements of the
1513 // `promoted_memory_index` (as we'd end up with gaps).
1514 // So instead, we build an "inverse memory_index", as if all of the
1515 // promoted fields were being used, but leave the elements not in the
1516 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1517 // obtain a valid (bijective) mapping.
1518 const INVALID_FIELD_IDX: u32 = !0;
1519 let mut combined_inverse_memory_index =
1520 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1521 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1522 let combined_offsets = variant_fields
1526 let (offset, memory_index) = match assignments[*local] {
1527 Unassigned => bug!(),
1529 let (offset, memory_index) =
1530 offsets_and_memory_index.next().unwrap();
1531 (offset, promoted_memory_index.len() as u32 + memory_index)
1533 Ineligible(field_idx) => {
1534 let field_idx = field_idx.unwrap() as usize;
1535 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1538 combined_inverse_memory_index[memory_index as usize] = i as u32;
1543 // Remove the unused slots and invert the mapping to obtain the
1544 // combined `memory_index` (also see previous comment).
1545 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1546 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1548 variant.fields = FieldPlacement::Arbitrary {
1549 offsets: combined_offsets,
1550 memory_index: combined_memory_index,
1553 size = size.max(variant.size);
1554 align = align.max(variant.align);
1557 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1559 size = size.align_to(align.abi);
1561 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1565 Abi::Aggregate { sized: true }
1568 let layout = tcx.intern_layout(LayoutDetails {
1569 variants: Variants::Multiple {
1571 discr_kind: DiscriminantKind::Tag,
1575 fields: outer_fields,
1577 largest_niche: prefix.largest_niche,
1581 debug!("generator layout ({:?}): {:#?}", ty, layout);
1585 /// This is invoked by the `layout_raw` query to record the final
1586 /// layout of each type.
1588 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1589 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1590 // for dumping later.
1591 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1592 self.record_layout_for_printing_outlined(layout)
1596 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1597 // Ignore layouts that are done with non-empty environments or
1598 // non-monomorphic layouts, as the user only wants to see the stuff
1599 // resulting from the final codegen session.
1600 if layout.ty.has_param_types() || !self.param_env.caller_bounds.is_empty() {
1604 // (delay format until we actually need it)
1605 let record = |kind, packed, opt_discr_size, variants| {
1606 let type_desc = format!("{:?}", layout.ty);
1607 self.tcx.sess.code_stats.record_type_size(
1618 let adt_def = match layout.ty.kind {
1619 ty::Adt(ref adt_def, _) => {
1620 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1624 ty::Closure(..) => {
1625 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1626 record(DataTypeKind::Closure, false, None, vec![]);
1631 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1636 let adt_kind = adt_def.adt_kind();
1637 let adt_packed = adt_def.repr.pack.is_some();
1639 let build_variant_info = |n: Option<Ident>, flds: &[ast::Name], layout: TyLayout<'tcx>| {
1640 let mut min_size = Size::ZERO;
1641 let field_info: Vec<_> = flds
1644 .map(|(i, &name)| match layout.field(self, i) {
1646 bug!("no layout found for field {}: `{:?}`", name, err);
1648 Ok(field_layout) => {
1649 let offset = layout.fields.offset(i);
1650 let field_end = offset + field_layout.size;
1651 if min_size < field_end {
1652 min_size = field_end;
1654 session::FieldInfo {
1655 name: name.to_string(),
1656 offset: offset.bytes(),
1657 size: field_layout.size.bytes(),
1658 align: field_layout.align.abi.bytes(),
1664 session::VariantInfo {
1665 name: n.map(|n| n.to_string()),
1666 kind: if layout.is_unsized() {
1667 session::SizeKind::Min
1669 session::SizeKind::Exact
1671 align: layout.align.abi.bytes(),
1672 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1677 match layout.variants {
1678 Variants::Single { index } => {
1679 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1680 if !adt_def.variants.is_empty() {
1681 let variant_def = &adt_def.variants[index];
1682 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1687 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1690 // (This case arises for *empty* enums; so give it
1692 record(adt_kind.into(), adt_packed, None, vec![]);
1696 Variants::Multiple { ref discr, ref discr_kind, .. } => {
1698 "print-type-size `{:#?}` adt general variants def {}",
1700 adt_def.variants.len()
1702 let variant_infos: Vec<_> = adt_def
1705 .map(|(i, variant_def)| {
1706 let fields: Vec<_> =
1707 variant_def.fields.iter().map(|f| f.ident.name).collect();
1709 Some(variant_def.ident),
1711 layout.for_variant(self, i),
1719 DiscriminantKind::Tag => Some(discr.value.size(self)),
1729 /// Type size "skeleton", i.e., the only information determining a type's size.
1730 /// While this is conservative, (aside from constant sizes, only pointers,
1731 /// newtypes thereof and null pointer optimized enums are allowed), it is
1732 /// enough to statically check common use cases of transmute.
1733 #[derive(Copy, Clone, Debug)]
1734 pub enum SizeSkeleton<'tcx> {
1735 /// Any statically computable Layout.
1738 /// A potentially-fat pointer.
1740 /// If true, this pointer is never null.
1742 /// The type which determines the unsized metadata, if any,
1743 /// of this pointer. Either a type parameter or a projection
1744 /// depending on one, with regions erased.
1749 impl<'tcx> SizeSkeleton<'tcx> {
1753 param_env: ty::ParamEnv<'tcx>,
1754 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1755 debug_assert!(!ty.has_infer_types_or_consts());
1757 // First try computing a static layout.
1758 let err = match tcx.layout_of(param_env.and(ty)) {
1760 return Ok(SizeSkeleton::Known(layout.size));
1766 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1767 let non_zero = !ty.is_unsafe_ptr();
1768 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1770 ty::Param(_) | ty::Projection(_) => {
1771 debug_assert!(tail.has_param_types());
1772 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
1775 "SizeSkeleton::compute({}): layout errored ({}), yet \
1776 tail `{}` is not a type parameter or a projection",
1784 ty::Adt(def, substs) => {
1785 // Only newtypes and enums w/ nullable pointer optimization.
1786 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1790 // Get a zero-sized variant or a pointer newtype.
1791 let zero_or_ptr_variant = |i| {
1792 let i = VariantIdx::new(i);
1793 let fields = def.variants[i]
1796 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1798 for field in fields {
1801 SizeSkeleton::Known(size) => {
1802 if size.bytes() > 0 {
1806 SizeSkeleton::Pointer { .. } => {
1817 let v0 = zero_or_ptr_variant(0)?;
1819 if def.variants.len() == 1 {
1820 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1821 return Ok(SizeSkeleton::Pointer {
1823 || match tcx.layout_scalar_valid_range(def.did) {
1824 (Bound::Included(start), Bound::Unbounded) => start > 0,
1825 (Bound::Included(start), Bound::Included(end)) => {
1826 0 < start && start < end
1837 let v1 = zero_or_ptr_variant(1)?;
1838 // Nullable pointer enum optimization.
1840 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1841 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1842 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1848 ty::Projection(_) | ty::Opaque(..) => {
1849 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1850 if ty == normalized {
1853 SizeSkeleton::compute(normalized, tcx, param_env)
1861 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1862 match (self, other) {
1863 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1864 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1872 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1873 fn tcx(&self) -> TyCtxt<'tcx>;
1876 pub trait HasParamEnv<'tcx> {
1877 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1880 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1881 fn data_layout(&self) -> &TargetDataLayout {
1886 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1887 fn tcx(&self) -> TyCtxt<'tcx> {
1892 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1893 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1898 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1899 fn data_layout(&self) -> &TargetDataLayout {
1900 self.tcx.data_layout()
1904 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1905 fn tcx(&self) -> TyCtxt<'tcx> {
1910 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1912 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1914 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1916 /// Computes the layout of a type. Note that this implicitly
1917 /// executes in "reveal all" mode.
1918 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1919 let param_env = self.param_env.with_reveal_all();
1920 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1921 let details = self.tcx.layout_raw(param_env.and(ty))?;
1922 let layout = TyLayout { ty, details };
1924 // N.B., this recording is normally disabled; when enabled, it
1925 // can however trigger recursive invocations of `layout_of`.
1926 // Therefore, we execute it *after* the main query has
1927 // completed, to avoid problems around recursive structures
1928 // and the like. (Admittedly, I wasn't able to reproduce a problem
1929 // here, but it seems like the right thing to do. -nmatsakis)
1930 self.record_layout_for_printing(layout);
1936 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1938 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1940 /// Computes the layout of a type. Note that this implicitly
1941 /// executes in "reveal all" mode.
1942 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1943 let param_env = self.param_env.with_reveal_all();
1944 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1945 let details = self.tcx.layout_raw(param_env.and(ty))?;
1946 let layout = TyLayout { ty, details };
1948 // N.B., this recording is normally disabled; when enabled, it
1949 // can however trigger recursive invocations of `layout_of`.
1950 // Therefore, we execute it *after* the main query has
1951 // completed, to avoid problems around recursive structures
1952 // and the like. (Admittedly, I wasn't able to reproduce a problem
1953 // here, but it seems like the right thing to do. -nmatsakis)
1954 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
1955 cx.record_layout_for_printing(layout);
1961 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1963 /// Computes the layout of a type. Note that this implicitly
1964 /// executes in "reveal all" mode.
1968 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1969 ) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1970 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
1971 cx.layout_of(param_env_and_ty.value)
1975 impl ty::query::TyCtxtAt<'tcx> {
1976 /// Computes the layout of a type. Note that this implicitly
1977 /// executes in "reveal all" mode.
1981 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1982 ) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1983 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
1984 cx.layout_of(param_env_and_ty.value)
1988 impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1990 C: LayoutOf<Ty = Ty<'tcx>, TyLayout: MaybeResult<TyLayout<'tcx>>>
1992 + HasParamEnv<'tcx>,
1994 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1995 let details = match this.variants {
1996 Variants::Single { index } if index == variant_index => this.details,
1998 Variants::Single { index } => {
1999 // Deny calling for_variant more than once for non-Single enums.
2000 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
2001 assert_eq!(layout.variants, Variants::Single { index });
2004 let fields = match this.ty.kind {
2005 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2009 tcx.intern_layout(LayoutDetails {
2010 variants: Variants::Single { index: variant_index },
2011 fields: FieldPlacement::Union(fields),
2012 abi: Abi::Uninhabited,
2013 largest_niche: None,
2014 align: tcx.data_layout.i8_align,
2019 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2022 assert_eq!(details.variants, Variants::Single { index: variant_index });
2024 TyLayout { ty: this.ty, details }
2027 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
2029 let discr_layout = |discr: &Scalar| -> C::TyLayout {
2030 let layout = LayoutDetails::scalar(cx, discr.clone());
2031 MaybeResult::from(Ok(TyLayout {
2032 details: tcx.intern_layout(layout),
2033 ty: discr.value.to_ty(tcx),
2037 cx.layout_of(match this.ty.kind {
2046 | ty::GeneratorWitness(..)
2048 | ty::Dynamic(..) => bug!("TyLayout::field_type({:?}): not applicable", this),
2050 // Potentially-fat pointers.
2051 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2052 assert!(i < this.fields.count());
2054 // Reuse the fat `*T` type as its own thin pointer data field.
2055 // This provides information about, e.g., DST struct pointees
2056 // (which may have no non-DST form), and will work as long
2057 // as the `Abi` or `FieldPlacement` is checked by users.
2059 let nil = tcx.mk_unit();
2060 let ptr_ty = if this.ty.is_unsafe_ptr() {
2063 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2065 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2067 ptr_layout.ty = this.ty;
2073 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
2074 ty::Slice(_) | ty::Str => tcx.types.usize,
2075 ty::Dynamic(_, _) => {
2076 tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2077 /* FIXME: use actual fn pointers
2078 Warning: naively computing the number of entries in the
2079 vtable by counting the methods on the trait + methods on
2080 all parent traits does not work, because some methods can
2081 be not object safe and thus excluded from the vtable.
2082 Increase this counter if you tried to implement this but
2083 failed to do it without duplicating a lot of code from
2084 other places in the compiler: 2
2086 tcx.mk_array(tcx.types.usize, 3),
2087 tcx.mk_array(Option<fn()>),
2091 _ => bug!("TyLayout::field_type({:?}): not applicable", this),
2095 // Arrays and slices.
2096 ty::Array(element, _) | ty::Slice(element) => element,
2097 ty::Str => tcx.types.u8,
2099 // Tuples, generators and closures.
2100 ty::Closure(def_id, ref substs) => {
2101 substs.as_closure().upvar_tys(def_id, tcx).nth(i).unwrap()
2104 ty::Generator(def_id, ref substs, _) => match this.variants {
2105 Variants::Single { index } => substs
2107 .state_tys(def_id, tcx)
2108 .nth(index.as_usize())
2112 Variants::Multiple { ref discr, discr_index, .. } => {
2113 if i == discr_index {
2114 return discr_layout(discr);
2116 substs.as_generator().prefix_tys(def_id, tcx).nth(i).unwrap()
2120 ty::Tuple(tys) => tys[i].expect_ty(),
2122 // SIMD vector types.
2123 ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
2126 ty::Adt(def, substs) => {
2127 match this.variants {
2128 Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2130 // Discriminant field for enums (where applicable).
2131 Variants::Multiple { ref discr, .. } => {
2133 return discr_layout(discr);
2139 | ty::UnnormalizedProjection(..)
2141 | ty::Placeholder(..)
2145 | ty::Error => bug!("TyLayout::field_type: unexpected type `{}`", this.ty),
2149 fn pointee_info_at(this: TyLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2150 match this.ty.kind {
2151 ty::RawPtr(mt) if offset.bytes() == 0 => {
2152 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2154 align: layout.align.abi,
2159 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2161 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2162 let kind = match mt {
2163 hir::Mutability::Not => {
2170 hir::Mutability::Mut => {
2171 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2172 // panic=abort mode. That was deemed right, as prior versions had many bugs
2173 // in conjunction with unwinding, but later versions didn’t seem to have
2174 // said issues. See issue #31681.
2176 // Alas, later on we encountered a case where noalias would generate wrong
2177 // code altogether even with recent versions of LLVM in *safe* code with no
2178 // unwinding involved. See #54462.
2180 // For now, do not enable mutable_noalias by default at all, while the
2181 // issue is being figured out.
2182 let mutable_noalias =
2183 tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(false);
2184 if mutable_noalias {
2185 PointerKind::UniqueBorrowed
2192 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2194 align: layout.align.abi,
2200 let mut data_variant = match this.variants {
2201 // Within the discriminant field, only the niche itself is
2202 // always initialized, so we only check for a pointer at its
2205 // If the niche is a pointer, it's either valid (according
2206 // to its type), or null (which the niche field's scalar
2207 // validity range encodes). This allows using
2208 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2209 // this will continue to work as long as we don't start
2210 // using more niches than just null (e.g., the first page of
2211 // the address space, or unaligned pointers).
2212 Variants::Multiple {
2213 discr_kind: DiscriminantKind::Niche { dataful_variant, .. },
2216 } if this.fields.offset(discr_index) == offset => {
2217 Some(this.for_variant(cx, dataful_variant))
2222 if let Some(variant) = data_variant {
2223 // We're not interested in any unions.
2224 if let FieldPlacement::Union(_) = variant.fields {
2225 data_variant = None;
2229 let mut result = None;
2231 if let Some(variant) = data_variant {
2232 let ptr_end = offset + Pointer.size(cx);
2233 for i in 0..variant.fields.count() {
2234 let field_start = variant.fields.offset(i);
2235 if field_start <= offset {
2236 let field = variant.field(cx, i);
2237 result = field.to_result().ok().and_then(|field| {
2238 if ptr_end <= field_start + field.size {
2239 // We found the right field, look inside it.
2240 field.pointee_info_at(cx, offset - field_start)
2245 if result.is_some() {
2252 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2253 if let Some(ref mut pointee) = result {
2254 if let ty::Adt(def, _) = this.ty.kind {
2255 if def.is_box() && offset.bytes() == 0 {
2256 pointee.safe = Some(PointerKind::UniqueOwned);
2267 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2268 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2269 use crate::ty::layout::LayoutError::*;
2270 mem::discriminant(self).hash_stable(hcx, hasher);
2273 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2278 impl<'tcx> ty::Instance<'tcx> {
2279 // NOTE(eddyb) this is private to avoid using it from outside of
2280 // `FnAbi::of_instance` - any other uses are either too high-level
2281 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2282 // or should go through `FnAbi` instead, to avoid losing any
2283 // adjustments `FnAbi::of_instance` might be performing.
2284 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2285 let ty = self.monomorphic_ty(tcx);
2288 // Shims currently have type FnPtr. Not sure this should remain.
2290 let mut sig = ty.fn_sig(tcx);
2291 if let ty::InstanceDef::VtableShim(..) = self.def {
2292 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2293 sig = sig.map_bound(|mut sig| {
2294 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2295 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2296 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2302 ty::Closure(def_id, substs) => {
2303 let sig = substs.as_closure().sig(def_id, tcx);
2305 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2306 sig.map_bound(|sig| tcx.mk_fn_sig(
2307 iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2314 ty::Generator(def_id, substs, _) => {
2315 let sig = substs.as_generator().poly_sig(def_id, tcx);
2317 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2318 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2320 let pin_did = tcx.lang_items().pin_type().unwrap();
2321 let pin_adt_ref = tcx.adt_def(pin_did);
2322 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2323 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2325 sig.map_bound(|sig| {
2326 let state_did = tcx.lang_items().gen_state().unwrap();
2327 let state_adt_ref = tcx.adt_def(state_did);
2328 let state_substs = tcx.intern_substs(&[
2329 sig.yield_ty.into(),
2330 sig.return_ty.into(),
2332 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2335 [env_ty, sig.resume_ty].iter(),
2338 hir::Unsafety::Normal,
2339 rustc_target::spec::abi::Abi::Rust
2343 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty)
2348 pub trait FnAbiExt<'tcx, C>
2350 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2354 + HasParamEnv<'tcx>,
2356 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2358 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2359 /// instead, where the instance is a `InstanceDef::Virtual`.
2360 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2362 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2363 /// direct calls to an `fn`.
2365 /// NB: that includes virtual calls, which are represented by "direct calls"
2366 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2367 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2371 sig: ty::PolyFnSig<'tcx>,
2372 extra_args: &[Ty<'tcx>],
2373 caller_location: Option<Ty<'tcx>>,
2374 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2376 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2379 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2381 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2385 + HasParamEnv<'tcx>,
2387 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2388 call::FnAbi::new_internal(cx, sig, extra_args, None, |ty, _| ArgAbi::new(cx.layout_of(ty)))
2391 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2392 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2394 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2395 Some(cx.tcx().caller_location_ty())
2400 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, |ty, arg_idx| {
2401 let mut layout = cx.layout_of(ty);
2402 // Don't pass the vtable, it's not an argument of the virtual fn.
2403 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2404 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2405 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2406 let fat_pointer_ty = if layout.is_unsized() {
2407 // unsized `self` is passed as a pointer to `self`
2408 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2409 cx.tcx().mk_mut_ptr(layout.ty)
2412 Abi::ScalarPair(..) => (),
2413 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2416 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2417 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2418 // elsewhere in the compiler as a method on a `dyn Trait`.
2419 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2420 // get a built-in pointer type
2421 let mut fat_pointer_layout = layout;
2422 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2423 && !fat_pointer_layout.ty.is_region_ptr()
2425 for i in 0..fat_pointer_layout.fields.count() {
2426 let field_layout = fat_pointer_layout.field(cx, i);
2428 if !field_layout.is_zst() {
2429 fat_pointer_layout = field_layout;
2430 continue 'descend_newtypes;
2434 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2437 fat_pointer_layout.ty
2440 // we now have a type like `*mut RcBox<dyn Trait>`
2441 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2442 // this is understood as a special case elsewhere in the compiler
2443 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2444 layout = cx.layout_of(unit_pointer_ty);
2445 layout.ty = fat_pointer_ty;
2453 sig: ty::PolyFnSig<'tcx>,
2454 extra_args: &[Ty<'tcx>],
2455 caller_location: Option<Ty<'tcx>>,
2456 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2458 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2460 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2462 use rustc_target::spec::abi::Abi::*;
2463 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2464 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2466 // It's the ABI's job to select this, not ours.
2467 System => bug!("system abi should be selected elsewhere"),
2468 EfiApi => bug!("eficall abi should be selected elsewhere"),
2470 Stdcall => Conv::X86Stdcall,
2471 Fastcall => Conv::X86Fastcall,
2472 Vectorcall => Conv::X86VectorCall,
2473 Thiscall => Conv::X86ThisCall,
2475 Unadjusted => Conv::C,
2476 Win64 => Conv::X86_64Win64,
2477 SysV64 => Conv::X86_64SysV,
2478 Aapcs => Conv::ArmAapcs,
2479 PtxKernel => Conv::PtxKernel,
2480 Msp430Interrupt => Conv::Msp430Intr,
2481 X86Interrupt => Conv::X86Intr,
2482 AmdGpuKernel => Conv::AmdGpuKernel,
2484 // These API constants ought to be more specific...
2488 let mut inputs = sig.inputs();
2489 let extra_args = if sig.abi == RustCall {
2490 assert!(!sig.c_variadic && extra_args.is_empty());
2492 if let Some(input) = sig.inputs().last() {
2493 if let ty::Tuple(tupled_arguments) = input.kind {
2494 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2495 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2498 "argument to function with \"rust-call\" ABI \
2504 "argument to function with \"rust-call\" ABI \
2509 assert!(sig.c_variadic || extra_args.is_empty());
2513 let target = &cx.tcx().sess.target.target;
2514 let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
2516 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2517 let linux_s390x_gnu_like =
2518 target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2519 let linux_sparc64_gnu_like =
2520 target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2521 let linux_powerpc_gnu_like =
2522 target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2523 let rust_abi = match sig.abi {
2524 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2528 // Handle safe Rust thin and fat pointers.
2529 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2531 layout: TyLayout<'tcx>,
2534 // Booleans are always an i1 that needs to be zero-extended.
2535 if scalar.is_bool() {
2536 attrs.set(ArgAttribute::ZExt);
2540 // Only pointer types handled below.
2541 if scalar.value != Pointer {
2545 if scalar.valid_range.start() < scalar.valid_range.end() {
2546 if *scalar.valid_range.start() > 0 {
2547 attrs.set(ArgAttribute::NonNull);
2551 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2552 if let Some(kind) = pointee.safe {
2553 attrs.pointee_align = Some(pointee.align);
2555 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2556 // for the entire duration of the function as they can be deallocated
2557 // any time. Set their valid size to 0.
2558 attrs.pointee_size = match kind {
2559 PointerKind::UniqueOwned => Size::ZERO,
2563 // `Box` pointer parameters never alias because ownership is transferred
2564 // `&mut` pointer parameters never alias other parameters,
2565 // or mutable global data
2567 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2568 // and can be marked as both `readonly` and `noalias`, as
2569 // LLVM's definition of `noalias` is based solely on memory
2570 // dependencies rather than pointer equality
2571 let no_alias = match kind {
2572 PointerKind::Shared => false,
2573 PointerKind::UniqueOwned => true,
2574 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2577 attrs.set(ArgAttribute::NoAlias);
2580 if kind == PointerKind::Frozen && !is_return {
2581 attrs.set(ArgAttribute::ReadOnly);
2587 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2588 let is_return = arg_idx.is_none();
2589 let mut arg = mk_arg_type(ty, arg_idx);
2590 if arg.layout.is_zst() {
2591 // For some forsaken reason, x86_64-pc-windows-gnu
2592 // doesn't ignore zero-sized struct arguments.
2593 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2597 && !linux_s390x_gnu_like
2598 && !linux_sparc64_gnu_like
2599 && !linux_powerpc_gnu_like)
2601 arg.mode = PassMode::Ignore;
2605 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2606 if !is_return && rust_abi {
2607 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2608 let mut a_attrs = ArgAttributes::new();
2609 let mut b_attrs = ArgAttributes::new();
2610 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2611 adjust_for_rust_scalar(
2615 a.value.size(cx).align_to(b.value.align(cx).abi),
2618 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2623 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2624 if let PassMode::Direct(ref mut attrs) = arg.mode {
2625 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2632 let mut fn_abi = FnAbi {
2633 ret: arg_of(sig.output(), None),
2638 .chain(caller_location)
2640 .map(|(i, ty)| arg_of(ty, Some(i)))
2642 c_variadic: sig.c_variadic,
2643 fixed_count: inputs.len(),
2646 fn_abi.adjust_for_abi(cx, sig.abi);
2650 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2651 if abi == SpecAbi::Unadjusted {
2655 if abi == SpecAbi::Rust
2656 || abi == SpecAbi::RustCall
2657 || abi == SpecAbi::RustIntrinsic
2658 || abi == SpecAbi::PlatformIntrinsic
2660 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2661 if arg.is_ignore() {
2665 match arg.layout.abi {
2666 Abi::Aggregate { .. } => {}
2668 // This is a fun case! The gist of what this is doing is
2669 // that we want callers and callees to always agree on the
2670 // ABI of how they pass SIMD arguments. If we were to *not*
2671 // make these arguments indirect then they'd be immediates
2672 // in LLVM, which means that they'd used whatever the
2673 // appropriate ABI is for the callee and the caller. That
2674 // means, for example, if the caller doesn't have AVX
2675 // enabled but the callee does, then passing an AVX argument
2676 // across this boundary would cause corrupt data to show up.
2678 // This problem is fixed by unconditionally passing SIMD
2679 // arguments through memory between callers and callees
2680 // which should get them all to agree on ABI regardless of
2681 // target feature sets. Some more information about this
2682 // issue can be found in #44367.
2684 // Note that the platform intrinsic ABI is exempt here as
2685 // that's how we connect up to LLVM and it's unstable
2686 // anyway, we control all calls to it in libstd.
2688 if abi != SpecAbi::PlatformIntrinsic
2689 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2691 arg.make_indirect();
2698 let size = arg.layout.size;
2699 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2700 arg.make_indirect();
2702 // We want to pass small aggregates as immediates, but using
2703 // a LLVM aggregate type for this leads to bad optimizations,
2704 // so we pick an appropriately sized integer type instead.
2705 arg.cast_to(Reg { kind: RegKind::Integer, size });
2708 fixup(&mut self.ret);
2709 for arg in &mut self.args {
2712 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2713 attrs.set(ArgAttribute::StructRet);
2718 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2719 cx.tcx().sess.fatal(&msg);