1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_ast::{self as ast, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42 impl IntegerExt for Integer {
43 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44 match (*self, signed) {
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
49 (I128, false) => tcx.types.u128,
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
54 (I128, true) => tcx.types.i128,
58 /// Gets the Integer type from an attr::IntType.
59 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60 let dl = cx.data_layout();
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69 dl.ptr_sized_integer()
74 /// Finds the appropriate Integer type and signedness for the given
75 /// signed discriminant range and `#[repr]` attribute.
76 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77 /// that shouldn't affect anything, other than maybe debuginfo.
84 ) -> (Integer, bool) {
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
100 "Integer::repr_discr: `#[repr]` hint too small for \
101 discriminant range of enum `{}",
105 return (discr, ity.is_signed());
109 match &tcx.sess.target.arch[..] {
110 // WARNING: the ARM EABI has two variants; the one corresponding
111 // to `at_least == I32` appears to be used on Linux and NetBSD,
112 // but some systems may use the variant corresponding to no
113 // lower bound. However, we don't run on those yet...?
114 "arm" => min_from_extern = Some(I32),
115 _ => min_from_extern = Some(I32),
119 let at_least = min_from_extern.unwrap_or(min_default);
121 // If there are no negative values, we can use the unsigned fit.
123 (cmp::max(unsigned_fit, at_least), false)
125 (cmp::max(signed_fit, at_least), true)
130 pub trait PrimitiveExt {
131 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
135 impl PrimitiveExt for Primitive {
136 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
138 Int(i, signed) => i.to_ty(tcx, signed),
139 F32 => tcx.types.f32,
140 F64 => tcx.types.f64,
141 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
145 /// Return an *integer* type matching this primitive.
146 /// Useful in particular when dealing with enum discriminants.
147 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
149 Int(i, signed) => i.to_ty(tcx, signed),
150 Pointer => tcx.types.usize,
151 F32 | F64 => bug!("floats do not have an int type"),
156 /// The first half of a fat pointer.
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
162 /// The second half of a fat pointer.
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
171 SizeOverflow(Ty<'tcx>),
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
177 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
178 LayoutError::SizeOverflow(ty) => {
179 write!(f, "values of the type `{}` are too big for the current architecture", ty)
187 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189 ty::tls::with_related_context(tcx, move |icx| {
190 let (param_env, ty) = query.into_parts();
192 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
196 // Update the ImplicitCtxt to increase the layout_depth
197 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
199 ty::tls::enter_context(&icx, |_| {
200 let cx = LayoutCx { tcx, param_env };
201 let layout = cx.layout_raw_uncached(ty);
202 // Type-level uninhabitedness should always imply ABI uninhabitedness.
203 if let Ok(layout) = layout {
204 if ty.conservative_is_privately_uninhabited(tcx) {
205 assert!(layout.abi.is_uninhabited());
213 pub fn provide(providers: &mut ty::query::Providers) {
214 *providers = ty::query::Providers { layout_raw, ..*providers };
217 pub struct LayoutCx<'tcx, C> {
219 pub param_env: ty::ParamEnv<'tcx>,
222 #[derive(Copy, Clone, Debug)]
224 /// A tuple, closure, or univariant which cannot be coerced to unsized.
226 /// A univariant, the last field of which may be coerced to unsized.
228 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229 Prefixed(Size, Align),
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238 let mut inverse = vec![0; map.len()];
239 for i in 0..map.len() {
240 inverse[map[i] as usize] = i as u32;
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247 let dl = self.data_layout();
248 let b_align = b.value.align(dl);
249 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250 let b_offset = a.value.size(dl).align_to(b_align.abi);
251 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
253 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254 // returns the last maximum.
255 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
257 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258 .max_by_key(|niche| niche.available(dl));
261 variants: Variants::Single { index: VariantIdx::new(0) },
262 fields: FieldsShape::Arbitrary {
263 offsets: vec![Size::ZERO, b_offset],
264 memory_index: vec![0, 1],
266 abi: Abi::ScalarPair(a, b),
273 fn univariant_uninterned(
276 fields: &[TyAndLayout<'_>],
279 ) -> Result<Layout, LayoutError<'tcx>> {
280 let dl = self.data_layout();
281 let pack = repr.pack;
282 if pack.is_some() && repr.align.is_some() {
283 bug!("struct cannot be packed and aligned");
286 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
288 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
290 let optimize = !repr.inhibit_struct_field_reordering_opt();
293 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294 let optimizing = &mut inverse_memory_index[..end];
295 let field_align = |f: &TyAndLayout<'_>| {
296 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
299 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300 optimizing.sort_by_key(|&x| {
301 // Place ZSTs first to avoid "interesting offsets",
302 // especially with only one or two non-ZST fields.
303 let f = &fields[x as usize];
304 (!f.is_zst(), cmp::Reverse(field_align(f)))
307 StructKind::Prefixed(..) => {
308 // Sort in ascending alignment so that the layout stay optimal
309 // regardless of the prefix
310 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
315 // inverse_memory_index holds field indices by increasing memory offset.
316 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317 // We now write field offsets to the corresponding offset slot;
318 // field 5 with offset 0 puts 0 in offsets[5].
319 // At the bottom of this function, we invert `inverse_memory_index` to
320 // produce `memory_index` (see `invert_mapping`).
322 let mut sized = true;
323 let mut offsets = vec![Size::ZERO; fields.len()];
324 let mut offset = Size::ZERO;
325 let mut largest_niche = None;
326 let mut largest_niche_available = 0;
328 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
330 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331 align = align.max(AbiAndPrefAlign::new(prefix_align));
332 offset = prefix_size.align_to(prefix_align);
335 for &i in &inverse_memory_index {
336 let field = fields[i as usize];
338 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
341 if field.is_unsized() {
345 // Invariant: offset < dl.obj_size_bound() <= 1<<61
346 let field_align = if let Some(pack) = pack {
347 field.align.min(AbiAndPrefAlign::new(pack))
351 offset = offset.align_to(field_align.abi);
352 align = align.max(field_align);
354 debug!("univariant offset: {:?} field: {:#?}", offset, field);
355 offsets[i as usize] = offset;
357 if !repr.hide_niche() {
358 if let Some(mut niche) = field.largest_niche.clone() {
359 let available = niche.available(dl);
360 if available > largest_niche_available {
361 largest_niche_available = available;
362 niche.offset += offset;
363 largest_niche = Some(niche);
368 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
371 if let Some(repr_align) = repr.align {
372 align = align.max(AbiAndPrefAlign::new(repr_align));
375 debug!("univariant min_size: {:?}", offset);
376 let min_size = offset;
378 // As stated above, inverse_memory_index holds field indices by increasing offset.
379 // This makes it an already-sorted view of the offsets vec.
380 // To invert it, consider:
381 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382 // Field 5 would be the first element, so memory_index is i:
383 // Note: if we didn't optimize, it's already right.
386 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
388 let size = min_size.align_to(align.abi);
389 let mut abi = Abi::Aggregate { sized };
391 // Unpack newtype ABIs and find scalar pairs.
392 if sized && size.bytes() > 0 {
393 // All other fields must be ZSTs.
394 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
396 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397 // We have exactly one non-ZST field.
398 (Some((i, field)), None, None) => {
399 // Field fills the struct and it has a scalar or scalar pair ABI.
400 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
403 // For plain scalars, or vectors of them, we can't unpack
404 // newtypes for `#[repr(C)]`, as that affects C ABIs.
405 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406 abi = field.abi.clone();
408 // But scalar pairs are Rust-specific and get
409 // treated as aggregates by C ABIs anyway.
410 Abi::ScalarPair(..) => {
411 abi = field.abi.clone();
418 // Two non-ZST fields, and they're both scalars.
420 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
424 // Order by the memory placement, not source order.
425 let ((i, a), (j, b)) =
426 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427 let pair = self.scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430 assert_eq!(memory_index, &[0, 1]);
435 if offsets[i] == pair_offsets[0]
436 && offsets[j] == pair_offsets[1]
437 && align == pair.align
440 // We can use `ScalarPair` only when it matches our
441 // already computed layout (including `#[repr(C)]`).
450 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451 abi = Abi::Uninhabited;
455 variants: Variants::Single { index: VariantIdx::new(0) },
456 fields: FieldsShape::Arbitrary { offsets, memory_index },
464 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
466 let param_env = self.param_env;
467 let dl = self.data_layout();
468 let scalar_unit = |value: Primitive| {
469 let bits = value.size(dl).bits();
470 assert!(bits <= 128);
471 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
473 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
475 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
476 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
478 debug_assert!(!ty.has_infer_types_or_consts());
480 Ok(match *ty.kind() {
482 ty::Bool => tcx.intern_layout(Layout::scalar(
484 Scalar { value: Int(I8, false), valid_range: 0..=1 },
486 ty::Char => tcx.intern_layout(Layout::scalar(
488 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
490 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
492 ty::Float(fty) => scalar(match fty {
493 ast::FloatTy::F32 => F32,
494 ast::FloatTy::F64 => F64,
497 let mut ptr = scalar_unit(Pointer);
498 ptr.valid_range = 1..=*ptr.valid_range.end();
499 tcx.intern_layout(Layout::scalar(self, ptr))
503 ty::Never => tcx.intern_layout(Layout {
504 variants: Variants::Single { index: VariantIdx::new(0) },
505 fields: FieldsShape::Primitive,
506 abi: Abi::Uninhabited,
512 // Potentially-wide pointers.
513 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
524 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
525 let metadata = match unsized_part.kind() {
527 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
529 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
531 let mut vtable = scalar_unit(Pointer);
532 vtable.valid_range = 1..=*vtable.valid_range.end();
535 _ => return Err(LayoutError::Unknown(unsized_part)),
538 // Effectively a (ptr, meta) tuple.
539 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
542 // Arrays and slices.
543 ty::Array(element, mut count) => {
544 if count.has_projections() {
545 count = tcx.normalize_erasing_regions(param_env, count);
546 if count.has_projections() {
547 return Err(LayoutError::Unknown(ty));
551 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
552 let element = self.layout_of(element)?;
554 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
556 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
559 Abi::Aggregate { sized: true }
562 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
564 tcx.intern_layout(Layout {
565 variants: Variants::Single { index: VariantIdx::new(0) },
566 fields: FieldsShape::Array { stride: element.size, count },
569 align: element.align,
573 ty::Slice(element) => {
574 let element = self.layout_of(element)?;
575 tcx.intern_layout(Layout {
576 variants: Variants::Single { index: VariantIdx::new(0) },
577 fields: FieldsShape::Array { stride: element.size, count: 0 },
578 abi: Abi::Aggregate { sized: false },
580 align: element.align,
584 ty::Str => tcx.intern_layout(Layout {
585 variants: Variants::Single { index: VariantIdx::new(0) },
586 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
587 abi: Abi::Aggregate { sized: false },
594 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
595 ty::Dynamic(..) | ty::Foreign(..) => {
596 let mut unit = self.univariant_uninterned(
599 &ReprOptions::default(),
600 StructKind::AlwaysSized,
603 Abi::Aggregate { ref mut sized } => *sized = false,
606 tcx.intern_layout(unit)
609 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
611 ty::Closure(_, ref substs) => {
612 let tys = substs.as_closure().upvar_tys();
614 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
615 &ReprOptions::default(),
616 StructKind::AlwaysSized,
622 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
626 .map(|k| self.layout_of(k.expect_ty()))
627 .collect::<Result<Vec<_>, _>>()?,
628 &ReprOptions::default(),
633 // SIMD vector types.
634 ty::Adt(def, ..) if def.repr.simd() => {
635 let element = self.layout_of(ty.simd_type(tcx))?;
636 let count = ty.simd_size(tcx);
638 let scalar = match element.abi {
639 Abi::Scalar(ref scalar) => scalar.clone(),
641 tcx.sess.fatal(&format!(
642 "monomorphising SIMD type `{}` with \
643 a non-machine element type `{}`",
649 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
650 let align = dl.vector_align(size);
651 let size = size.align_to(align.abi);
653 tcx.intern_layout(Layout {
654 variants: Variants::Single { index: VariantIdx::new(0) },
655 fields: FieldsShape::Array { stride: element.size, count },
656 abi: Abi::Vector { element: scalar, count },
657 largest_niche: element.largest_niche.clone(),
664 ty::Adt(def, substs) => {
665 // Cache the field layouts.
672 .map(|field| self.layout_of(field.ty(tcx, substs)))
673 .collect::<Result<Vec<_>, _>>()
675 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
678 if def.repr.pack.is_some() && def.repr.align.is_some() {
679 bug!("union cannot be packed and aligned");
683 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
685 if let Some(repr_align) = def.repr.align {
686 align = align.max(AbiAndPrefAlign::new(repr_align));
689 let optimize = !def.repr.inhibit_union_abi_opt();
690 let mut size = Size::ZERO;
691 let mut abi = Abi::Aggregate { sized: true };
692 let index = VariantIdx::new(0);
693 for field in &variants[index] {
694 assert!(!field.is_unsized());
695 align = align.max(field.align);
697 // If all non-ZST fields have the same ABI, forward this ABI
698 if optimize && !field.is_zst() {
699 // Normalize scalar_unit to the maximal valid range
700 let field_abi = match &field.abi {
701 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
702 Abi::ScalarPair(x, y) => {
703 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
705 Abi::Vector { element: x, count } => {
706 Abi::Vector { element: scalar_unit(x.value), count: *count }
708 Abi::Uninhabited | Abi::Aggregate { .. } => {
709 Abi::Aggregate { sized: true }
713 if size == Size::ZERO {
714 // first non ZST: initialize 'abi'
716 } else if abi != field_abi {
717 // different fields have different ABI: reset to Aggregate
718 abi = Abi::Aggregate { sized: true };
722 size = cmp::max(size, field.size);
725 if let Some(pack) = def.repr.pack {
726 align = align.min(AbiAndPrefAlign::new(pack));
729 return Ok(tcx.intern_layout(Layout {
730 variants: Variants::Single { index },
731 fields: FieldsShape::Union(
732 NonZeroUsize::new(variants[index].len())
733 .ok_or(LayoutError::Unknown(ty))?,
738 size: size.align_to(align.abi),
742 // A variant is absent if it's uninhabited and only has ZST fields.
743 // Present uninhabited variants only require space for their fields,
744 // but *not* an encoding of the discriminant (e.g., a tag value).
745 // See issue #49298 for more details on the need to leave space
746 // for non-ZST uninhabited data (mostly partial initialization).
747 let absent = |fields: &[TyAndLayout<'_>]| {
748 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
749 let is_zst = fields.iter().all(|f| f.is_zst());
750 uninhabited && is_zst
752 let (present_first, present_second) = {
753 let mut present_variants = variants
755 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
756 (present_variants.next(), present_variants.next())
758 let present_first = match present_first {
759 Some(present_first) => present_first,
760 // Uninhabited because it has no variants, or only absent ones.
761 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
762 // If it's a struct, still compute a layout so that we can still compute the
764 None => VariantIdx::new(0),
767 let is_struct = !def.is_enum() ||
768 // Only one variant is present.
769 (present_second.is_none() &&
770 // Representation optimizations are allowed.
771 !def.repr.inhibit_enum_layout_opt());
773 // Struct, or univariant enum equivalent to a struct.
774 // (Typechecking will reject discriminant-sizing attrs.)
776 let v = present_first;
777 let kind = if def.is_enum() || variants[v].is_empty() {
778 StructKind::AlwaysSized
780 let param_env = tcx.param_env(def.did);
781 let last_field = def.variants[v].fields.last().unwrap();
783 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
785 StructKind::MaybeUnsized
787 StructKind::AlwaysSized
791 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
792 st.variants = Variants::Single { index: v };
793 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
795 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
796 // the asserts ensure that we are not using the
797 // `#[rustc_layout_scalar_valid_range(n)]`
798 // attribute to widen the range of anything as that would probably
799 // result in UB somewhere
800 // FIXME(eddyb) the asserts are probably not needed,
801 // as larger validity ranges would result in missed
802 // optimizations, *not* wrongly assuming the inner
803 // value is valid. e.g. unions enlarge validity ranges,
804 // because the values may be uninitialized.
805 if let Bound::Included(start) = start {
806 // FIXME(eddyb) this might be incorrect - it doesn't
807 // account for wrap-around (end < start) ranges.
808 assert!(*scalar.valid_range.start() <= start);
809 scalar.valid_range = start..=*scalar.valid_range.end();
811 if let Bound::Included(end) = end {
812 // FIXME(eddyb) this might be incorrect - it doesn't
813 // account for wrap-around (end < start) ranges.
814 assert!(*scalar.valid_range.end() >= end);
815 scalar.valid_range = *scalar.valid_range.start()..=end;
818 // Update `largest_niche` if we have introduced a larger niche.
819 let niche = if def.repr.hide_niche() {
822 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
824 if let Some(niche) = niche {
825 match &st.largest_niche {
826 Some(largest_niche) => {
827 // Replace the existing niche even if they're equal,
828 // because this one is at a lower offset.
829 if largest_niche.available(dl) <= niche.available(dl) {
830 st.largest_niche = Some(niche);
833 None => st.largest_niche = Some(niche),
838 start == Bound::Unbounded && end == Bound::Unbounded,
839 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
845 return Ok(tcx.intern_layout(st));
848 // At this point, we have handled all unions and
849 // structs. (We have also handled univariant enums
850 // that allow representation optimization.)
851 assert!(def.is_enum());
853 // The current code for niche-filling relies on variant indices
854 // instead of actual discriminants, so dataful enums with
855 // explicit discriminants (RFC #2363) would misbehave.
856 let no_explicit_discriminants = def
859 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
861 let mut niche_filling_layout = None;
863 // Niche-filling enum optimization.
864 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
865 let mut dataful_variant = None;
866 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
868 // Find one non-ZST variant.
869 'variants: for (v, fields) in variants.iter_enumerated() {
875 if dataful_variant.is_none() {
876 dataful_variant = Some(v);
879 dataful_variant = None;
884 niche_variants = *niche_variants.start().min(&v)..=v;
887 if niche_variants.start() > niche_variants.end() {
888 dataful_variant = None;
891 if let Some(i) = dataful_variant {
892 let count = (niche_variants.end().as_u32()
893 - niche_variants.start().as_u32()
896 // Find the field with the largest niche
897 let niche_candidate = variants[i]
900 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
901 .max_by_key(|(_, niche)| niche.available(dl));
903 if let Some((field_index, niche, (niche_start, niche_scalar))) =
904 niche_candidate.and_then(|(field_index, niche)| {
905 Some((field_index, niche, niche.reserve(self, count)?))
908 let mut align = dl.aggregate_align;
912 let mut st = self.univariant_uninterned(
916 StructKind::AlwaysSized,
918 st.variants = Variants::Single { index: j };
920 align = align.max(st.align);
924 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
926 let offset = st[i].fields.offset(field_index) + niche.offset;
927 let size = st[i].size;
929 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
933 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
934 Abi::ScalarPair(ref first, ref second) => {
935 // We need to use scalar_unit to reset the
936 // valid range to the maximal one for that
937 // primitive, because only the niche is
938 // guaranteed to be initialised, not the
940 if offset.bytes() == 0 {
942 niche_scalar.clone(),
943 scalar_unit(second.value),
947 scalar_unit(first.value),
948 niche_scalar.clone(),
952 _ => Abi::Aggregate { sized: true },
957 Niche::from_scalar(dl, offset, niche_scalar.clone());
959 niche_filling_layout = Some(Layout {
960 variants: Variants::Multiple {
962 tag_encoding: TagEncoding::Niche {
970 fields: FieldsShape::Arbitrary {
971 offsets: vec![offset],
972 memory_index: vec![0],
983 let (mut min, mut max) = (i128::MAX, i128::MIN);
984 let discr_type = def.repr.discr_type();
985 let bits = Integer::from_attr(self, discr_type).size().bits();
986 for (i, discr) in def.discriminants(tcx) {
987 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
990 let mut x = discr.val as i128;
991 if discr_type.is_signed() {
992 // sign extend the raw representation to be an i128
993 x = (x << (128 - bits)) >> (128 - bits);
1002 // We might have no inhabited variants, so pretend there's at least one.
1003 if (min, max) == (i128::MAX, i128::MIN) {
1007 assert!(min <= max, "discriminant range is {}...{}", min, max);
1008 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1010 let mut align = dl.aggregate_align;
1011 let mut size = Size::ZERO;
1013 // We're interested in the smallest alignment, so start large.
1014 let mut start_align = Align::from_bytes(256).unwrap();
1015 assert_eq!(Integer::for_align(dl, start_align), None);
1017 // repr(C) on an enum tells us to make a (tag, union) layout,
1018 // so we need to grow the prefix alignment to be at least
1019 // the alignment of the union. (This value is used both for
1020 // determining the alignment of the overall enum, and the
1021 // determining the alignment of the payload after the tag.)
1022 let mut prefix_align = min_ity.align(dl).abi;
1024 for fields in &variants {
1025 for field in fields {
1026 prefix_align = prefix_align.max(field.align.abi);
1031 // Create the set of structs that represent each variant.
1032 let mut layout_variants = variants
1034 .map(|(i, field_layouts)| {
1035 let mut st = self.univariant_uninterned(
1039 StructKind::Prefixed(min_ity.size(), prefix_align),
1041 st.variants = Variants::Single { index: i };
1042 // Find the first field we can't move later
1043 // to make room for a larger discriminant.
1045 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1047 if !field.is_zst() || field.align.abi.bytes() != 1 {
1048 start_align = start_align.min(field.align.abi);
1052 size = cmp::max(size, st.size);
1053 align = align.max(st.align);
1056 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1058 // Align the maximum variant size to the largest alignment.
1059 size = size.align_to(align.abi);
1061 if size.bytes() >= dl.obj_size_bound() {
1062 return Err(LayoutError::SizeOverflow(ty));
1065 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1066 if typeck_ity < min_ity {
1067 // It is a bug if Layout decided on a greater discriminant size than typeck for
1068 // some reason at this point (based on values discriminant can take on). Mostly
1069 // because this discriminant will be loaded, and then stored into variable of
1070 // type calculated by typeck. Consider such case (a bug): typeck decided on
1071 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1072 // discriminant values. That would be a bug, because then, in codegen, in order
1073 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1074 // space necessary to represent would have to be discarded (or layout is wrong
1075 // on thinking it needs 16 bits)
1077 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1081 // However, it is fine to make discr type however large (as an optimisation)
1082 // after this point – we’ll just truncate the value we load in codegen.
1085 // Check to see if we should use a different type for the
1086 // discriminant. We can safely use a type with the same size
1087 // as the alignment of the first field of each variant.
1088 // We increase the size of the discriminant to avoid LLVM copying
1089 // padding when it doesn't need to. This normally causes unaligned
1090 // load/stores and excessive memcpy/memset operations. By using a
1091 // bigger integer size, LLVM can be sure about its contents and
1092 // won't be so conservative.
1094 // Use the initial field alignment
1095 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1098 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1101 // If the alignment is not larger than the chosen discriminant size,
1102 // don't use the alignment as the final size.
1106 // Patch up the variants' first few fields.
1107 let old_ity_size = min_ity.size();
1108 let new_ity_size = ity.size();
1109 for variant in &mut layout_variants {
1110 match variant.fields {
1111 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1113 if *i <= old_ity_size {
1114 assert_eq!(*i, old_ity_size);
1118 // We might be making the struct larger.
1119 if variant.size <= old_ity_size {
1120 variant.size = new_ity_size;
1128 let tag_mask = !0u128 >> (128 - ity.size().bits());
1130 value: Int(ity, signed),
1131 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1133 let mut abi = Abi::Aggregate { sized: true };
1134 if tag.value.size(dl) == size {
1135 abi = Abi::Scalar(tag.clone());
1137 // Try to use a ScalarPair for all tagged enums.
1138 let mut common_prim = None;
1139 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1140 let offsets = match layout_variant.fields {
1141 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1145 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1146 let (field, offset) = match (fields.next(), fields.next()) {
1147 (None, None) => continue,
1148 (Some(pair), None) => pair,
1154 let prim = match field.abi {
1155 Abi::Scalar(ref scalar) => scalar.value,
1161 if let Some(pair) = common_prim {
1162 // This is pretty conservative. We could go fancier
1163 // by conflating things like i32 and u32, or even
1164 // realising that (u8, u8) could just cohabit with
1166 if pair != (prim, offset) {
1171 common_prim = Some((prim, offset));
1174 if let Some((prim, offset)) = common_prim {
1175 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1176 let pair_offsets = match pair.fields {
1177 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1178 assert_eq!(memory_index, &[0, 1]);
1183 if pair_offsets[0] == Size::ZERO
1184 && pair_offsets[1] == *offset
1185 && align == pair.align
1186 && size == pair.size
1188 // We can use `ScalarPair` only when it matches our
1189 // already computed layout (including `#[repr(C)]`).
1195 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1196 abi = Abi::Uninhabited;
1199 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1201 let tagged_layout = Layout {
1202 variants: Variants::Multiple {
1204 tag_encoding: TagEncoding::Direct,
1206 variants: layout_variants,
1208 fields: FieldsShape::Arbitrary {
1209 offsets: vec![Size::ZERO],
1210 memory_index: vec![0],
1218 let best_layout = match (tagged_layout, niche_filling_layout) {
1219 (tagged_layout, Some(niche_filling_layout)) => {
1220 // Pick the smaller layout; otherwise,
1221 // pick the layout with the larger niche; otherwise,
1222 // pick tagged as it has simpler codegen.
1223 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1225 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1226 (layout.size, cmp::Reverse(niche_size))
1229 (tagged_layout, None) => tagged_layout,
1232 tcx.intern_layout(best_layout)
1235 // Types with no meaningful known layout.
1236 ty::Projection(_) | ty::Opaque(..) => {
1237 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1238 if ty == normalized {
1239 return Err(LayoutError::Unknown(ty));
1241 tcx.layout_raw(param_env.and(normalized))?
1244 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1245 bug!("Layout::compute: unexpected type `{}`", ty)
1248 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1249 return Err(LayoutError::Unknown(ty));
1255 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1256 #[derive(Clone, Debug, PartialEq)]
1257 enum SavedLocalEligibility {
1259 Assigned(VariantIdx),
1260 // FIXME: Use newtype_index so we aren't wasting bytes
1261 Ineligible(Option<u32>),
1264 // When laying out generators, we divide our saved local fields into two
1265 // categories: overlap-eligible and overlap-ineligible.
1267 // Those fields which are ineligible for overlap go in a "prefix" at the
1268 // beginning of the layout, and always have space reserved for them.
1270 // Overlap-eligible fields are only assigned to one variant, so we lay
1271 // those fields out for each variant and put them right after the
1274 // Finally, in the layout details, we point to the fields from the
1275 // variants they are assigned to. It is possible for some fields to be
1276 // included in multiple variants. No field ever "moves around" in the
1277 // layout; its offset is always the same.
1279 // Also included in the layout are the upvars and the discriminant.
1280 // These are included as fields on the "outer" layout; they are not part
1282 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1283 /// Compute the eligibility and assignment of each local.
1284 fn generator_saved_local_eligibility(
1286 info: &GeneratorLayout<'tcx>,
1287 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1288 use SavedLocalEligibility::*;
1290 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1291 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1293 // The saved locals not eligible for overlap. These will get
1294 // "promoted" to the prefix of our generator.
1295 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1297 // Figure out which of our saved locals are fields in only
1298 // one variant. The rest are deemed ineligible for overlap.
1299 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1300 for local in fields {
1301 match assignments[*local] {
1303 assignments[*local] = Assigned(variant_index);
1306 // We've already seen this local at another suspension
1307 // point, so it is no longer a candidate.
1309 "removing local {:?} in >1 variant ({:?}, {:?})",
1314 ineligible_locals.insert(*local);
1315 assignments[*local] = Ineligible(None);
1322 // Next, check every pair of eligible locals to see if they
1324 for local_a in info.storage_conflicts.rows() {
1325 let conflicts_a = info.storage_conflicts.count(local_a);
1326 if ineligible_locals.contains(local_a) {
1330 for local_b in info.storage_conflicts.iter(local_a) {
1331 // local_a and local_b are storage live at the same time, therefore they
1332 // cannot overlap in the generator layout. The only way to guarantee
1333 // this is if they are in the same variant, or one is ineligible
1334 // (which means it is stored in every variant).
1335 if ineligible_locals.contains(local_b)
1336 || assignments[local_a] == assignments[local_b]
1341 // If they conflict, we will choose one to make ineligible.
1342 // This is not always optimal; it's just a greedy heuristic that
1343 // seems to produce good results most of the time.
1344 let conflicts_b = info.storage_conflicts.count(local_b);
1345 let (remove, other) =
1346 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1347 ineligible_locals.insert(remove);
1348 assignments[remove] = Ineligible(None);
1349 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1353 // Count the number of variants in use. If only one of them, then it is
1354 // impossible to overlap any locals in our layout. In this case it's
1355 // always better to make the remaining locals ineligible, so we can
1356 // lay them out with the other locals in the prefix and eliminate
1357 // unnecessary padding bytes.
1359 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1360 for assignment in &assignments {
1361 if let Assigned(idx) = assignment {
1362 used_variants.insert(*idx);
1365 if used_variants.count() < 2 {
1366 for assignment in assignments.iter_mut() {
1367 *assignment = Ineligible(None);
1369 ineligible_locals.insert_all();
1373 // Write down the order of our locals that will be promoted to the prefix.
1375 for (idx, local) in ineligible_locals.iter().enumerate() {
1376 assignments[local] = Ineligible(Some(idx as u32));
1379 debug!("generator saved local assignments: {:?}", assignments);
1381 (ineligible_locals, assignments)
1384 /// Compute the full generator layout.
1385 fn generator_layout(
1388 def_id: hir::def_id::DefId,
1389 substs: SubstsRef<'tcx>,
1390 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1391 use SavedLocalEligibility::*;
1394 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1396 let info = tcx.generator_layout(def_id);
1397 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1399 // Build a prefix layout, including "promoting" all ineligible
1400 // locals as part of the prefix. We compute the layout of all of
1401 // these fields at once to get optimal packing.
1402 let tag_index = substs.as_generator().prefix_tys().count();
1404 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1405 let max_discr = (info.variant_fields.len() - 1) as u128;
1406 let discr_int = Integer::fit_unsigned(max_discr);
1407 let discr_int_ty = discr_int.to_ty(tcx, false);
1408 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1409 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1410 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1412 let promoted_layouts = ineligible_locals
1414 .map(|local| subst_field(info.field_tys[local]))
1415 .map(|ty| tcx.mk_maybe_uninit(ty))
1416 .map(|ty| self.layout_of(ty));
1417 let prefix_layouts = substs
1420 .map(|ty| self.layout_of(ty))
1421 .chain(iter::once(Ok(tag_layout)))
1422 .chain(promoted_layouts)
1423 .collect::<Result<Vec<_>, _>>()?;
1424 let prefix = self.univariant_uninterned(
1427 &ReprOptions::default(),
1428 StructKind::AlwaysSized,
1431 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1433 // Split the prefix layout into the "outer" fields (upvars and
1434 // discriminant) and the "promoted" fields. Promoted fields will
1435 // get included in each variant that requested them in
1437 debug!("prefix = {:#?}", prefix);
1438 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1439 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1440 let mut inverse_memory_index = invert_mapping(&memory_index);
1442 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1443 // "outer" and "promoted" fields respectively.
1444 let b_start = (tag_index + 1) as u32;
1445 let offsets_b = offsets.split_off(b_start as usize);
1446 let offsets_a = offsets;
1448 // Disentangle the "a" and "b" components of `inverse_memory_index`
1449 // by preserving the order but keeping only one disjoint "half" each.
1450 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1451 let inverse_memory_index_b: Vec<_> =
1452 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1453 inverse_memory_index.retain(|&i| i < b_start);
1454 let inverse_memory_index_a = inverse_memory_index;
1456 // Since `inverse_memory_index_{a,b}` each only refer to their
1457 // respective fields, they can be safely inverted
1458 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1459 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1462 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1463 (outer_fields, offsets_b, memory_index_b)
1468 let mut size = prefix.size;
1469 let mut align = prefix.align;
1473 .map(|(index, variant_fields)| {
1474 // Only include overlap-eligible fields when we compute our variant layout.
1475 let variant_only_tys = variant_fields
1477 .filter(|local| match assignments[**local] {
1478 Unassigned => bug!(),
1479 Assigned(v) if v == index => true,
1480 Assigned(_) => bug!("assignment does not match variant"),
1481 Ineligible(_) => false,
1483 .map(|local| subst_field(info.field_tys[*local]));
1485 let mut variant = self.univariant_uninterned(
1488 .map(|ty| self.layout_of(ty))
1489 .collect::<Result<Vec<_>, _>>()?,
1490 &ReprOptions::default(),
1491 StructKind::Prefixed(prefix_size, prefix_align.abi),
1493 variant.variants = Variants::Single { index };
1495 let (offsets, memory_index) = match variant.fields {
1496 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1500 // Now, stitch the promoted and variant-only fields back together in
1501 // the order they are mentioned by our GeneratorLayout.
1502 // Because we only use some subset (that can differ between variants)
1503 // of the promoted fields, we can't just pick those elements of the
1504 // `promoted_memory_index` (as we'd end up with gaps).
1505 // So instead, we build an "inverse memory_index", as if all of the
1506 // promoted fields were being used, but leave the elements not in the
1507 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1508 // obtain a valid (bijective) mapping.
1509 const INVALID_FIELD_IDX: u32 = !0;
1510 let mut combined_inverse_memory_index =
1511 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1512 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1513 let combined_offsets = variant_fields
1517 let (offset, memory_index) = match assignments[*local] {
1518 Unassigned => bug!(),
1520 let (offset, memory_index) =
1521 offsets_and_memory_index.next().unwrap();
1522 (offset, promoted_memory_index.len() as u32 + memory_index)
1524 Ineligible(field_idx) => {
1525 let field_idx = field_idx.unwrap() as usize;
1526 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1529 combined_inverse_memory_index[memory_index as usize] = i as u32;
1534 // Remove the unused slots and invert the mapping to obtain the
1535 // combined `memory_index` (also see previous comment).
1536 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1537 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1539 variant.fields = FieldsShape::Arbitrary {
1540 offsets: combined_offsets,
1541 memory_index: combined_memory_index,
1544 size = size.max(variant.size);
1545 align = align.max(variant.align);
1548 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1550 size = size.align_to(align.abi);
1552 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1556 Abi::Aggregate { sized: true }
1559 let layout = tcx.intern_layout(Layout {
1560 variants: Variants::Multiple {
1562 tag_encoding: TagEncoding::Direct,
1563 tag_field: tag_index,
1566 fields: outer_fields,
1568 largest_niche: prefix.largest_niche,
1572 debug!("generator layout ({:?}): {:#?}", ty, layout);
1576 /// This is invoked by the `layout_raw` query to record the final
1577 /// layout of each type.
1579 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1580 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1581 // for dumping later.
1582 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1583 self.record_layout_for_printing_outlined(layout)
1587 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1588 // Ignore layouts that are done with non-empty environments or
1589 // non-monomorphic layouts, as the user only wants to see the stuff
1590 // resulting from the final codegen session.
1591 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1595 // (delay format until we actually need it)
1596 let record = |kind, packed, opt_discr_size, variants| {
1597 let type_desc = format!("{:?}", layout.ty);
1598 self.tcx.sess.code_stats.record_type_size(
1609 let adt_def = match *layout.ty.kind() {
1610 ty::Adt(ref adt_def, _) => {
1611 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1615 ty::Closure(..) => {
1616 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1617 record(DataTypeKind::Closure, false, None, vec![]);
1622 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1627 let adt_kind = adt_def.adt_kind();
1628 let adt_packed = adt_def.repr.pack.is_some();
1630 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1631 let mut min_size = Size::ZERO;
1632 let field_info: Vec<_> = flds
1635 .map(|(i, &name)| match layout.field(self, i) {
1637 bug!("no layout found for field {}: `{:?}`", name, err);
1639 Ok(field_layout) => {
1640 let offset = layout.fields.offset(i);
1641 let field_end = offset + field_layout.size;
1642 if min_size < field_end {
1643 min_size = field_end;
1646 name: name.to_string(),
1647 offset: offset.bytes(),
1648 size: field_layout.size.bytes(),
1649 align: field_layout.align.abi.bytes(),
1656 name: n.map(|n| n.to_string()),
1657 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1658 align: layout.align.abi.bytes(),
1659 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1664 match layout.variants {
1665 Variants::Single { index } => {
1666 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1667 if !adt_def.variants.is_empty() {
1668 let variant_def = &adt_def.variants[index];
1669 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1674 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1677 // (This case arises for *empty* enums; so give it
1679 record(adt_kind.into(), adt_packed, None, vec![]);
1683 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1685 "print-type-size `{:#?}` adt general variants def {}",
1687 adt_def.variants.len()
1689 let variant_infos: Vec<_> = adt_def
1692 .map(|(i, variant_def)| {
1693 let fields: Vec<_> =
1694 variant_def.fields.iter().map(|f| f.ident.name).collect();
1696 Some(variant_def.ident),
1698 layout.for_variant(self, i),
1705 match tag_encoding {
1706 TagEncoding::Direct => Some(tag.value.size(self)),
1716 /// Type size "skeleton", i.e., the only information determining a type's size.
1717 /// While this is conservative, (aside from constant sizes, only pointers,
1718 /// newtypes thereof and null pointer optimized enums are allowed), it is
1719 /// enough to statically check common use cases of transmute.
1720 #[derive(Copy, Clone, Debug)]
1721 pub enum SizeSkeleton<'tcx> {
1722 /// Any statically computable Layout.
1725 /// A potentially-fat pointer.
1727 /// If true, this pointer is never null.
1729 /// The type which determines the unsized metadata, if any,
1730 /// of this pointer. Either a type parameter or a projection
1731 /// depending on one, with regions erased.
1736 impl<'tcx> SizeSkeleton<'tcx> {
1740 param_env: ty::ParamEnv<'tcx>,
1741 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1742 debug_assert!(!ty.has_infer_types_or_consts());
1744 // First try computing a static layout.
1745 let err = match tcx.layout_of(param_env.and(ty)) {
1747 return Ok(SizeSkeleton::Known(layout.size));
1753 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1754 let non_zero = !ty.is_unsafe_ptr();
1755 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1757 ty::Param(_) | ty::Projection(_) => {
1758 debug_assert!(tail.has_param_types_or_consts());
1759 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
1762 "SizeSkeleton::compute({}): layout errored ({}), yet \
1763 tail `{}` is not a type parameter or a projection",
1771 ty::Adt(def, substs) => {
1772 // Only newtypes and enums w/ nullable pointer optimization.
1773 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1777 // Get a zero-sized variant or a pointer newtype.
1778 let zero_or_ptr_variant = |i| {
1779 let i = VariantIdx::new(i);
1780 let fields = def.variants[i]
1783 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1785 for field in fields {
1788 SizeSkeleton::Known(size) => {
1789 if size.bytes() > 0 {
1793 SizeSkeleton::Pointer { .. } => {
1804 let v0 = zero_or_ptr_variant(0)?;
1806 if def.variants.len() == 1 {
1807 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1808 return Ok(SizeSkeleton::Pointer {
1810 || match tcx.layout_scalar_valid_range(def.did) {
1811 (Bound::Included(start), Bound::Unbounded) => start > 0,
1812 (Bound::Included(start), Bound::Included(end)) => {
1813 0 < start && start < end
1824 let v1 = zero_or_ptr_variant(1)?;
1825 // Nullable pointer enum optimization.
1827 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1828 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1829 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1835 ty::Projection(_) | ty::Opaque(..) => {
1836 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1837 if ty == normalized {
1840 SizeSkeleton::compute(normalized, tcx, param_env)
1848 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1849 match (self, other) {
1850 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1851 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1859 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1860 fn tcx(&self) -> TyCtxt<'tcx>;
1863 pub trait HasParamEnv<'tcx> {
1864 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1867 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1868 fn data_layout(&self) -> &TargetDataLayout {
1873 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1874 fn tcx(&self) -> TyCtxt<'tcx> {
1879 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1880 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1885 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1886 fn data_layout(&self) -> &TargetDataLayout {
1887 self.tcx.data_layout()
1891 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1892 fn tcx(&self) -> TyCtxt<'tcx> {
1897 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1899 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1901 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1903 /// Computes the layout of a type. Note that this implicitly
1904 /// executes in "reveal all" mode.
1905 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1906 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1907 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1908 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1909 let layout = TyAndLayout { ty, layout };
1911 // N.B., this recording is normally disabled; when enabled, it
1912 // can however trigger recursive invocations of `layout_of`.
1913 // Therefore, we execute it *after* the main query has
1914 // completed, to avoid problems around recursive structures
1915 // and the like. (Admittedly, I wasn't able to reproduce a problem
1916 // here, but it seems like the right thing to do. -nmatsakis)
1917 self.record_layout_for_printing(layout);
1923 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1925 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1927 /// Computes the layout of a type. Note that this implicitly
1928 /// executes in "reveal all" mode.
1929 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1930 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
1931 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1932 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1933 let layout = TyAndLayout { ty, layout };
1935 // N.B., this recording is normally disabled; when enabled, it
1936 // can however trigger recursive invocations of `layout_of`.
1937 // Therefore, we execute it *after* the main query has
1938 // completed, to avoid problems around recursive structures
1939 // and the like. (Admittedly, I wasn't able to reproduce a problem
1940 // here, but it seems like the right thing to do. -nmatsakis)
1941 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
1942 cx.record_layout_for_printing(layout);
1948 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1950 /// Computes the layout of a type. Note that this implicitly
1951 /// executes in "reveal all" mode.
1955 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1956 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1957 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
1958 cx.layout_of(param_env_and_ty.value)
1962 impl ty::query::TyCtxtAt<'tcx> {
1963 /// Computes the layout of a type. Note that this implicitly
1964 /// executes in "reveal all" mode.
1968 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1969 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1970 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
1971 cx.layout_of(param_env_and_ty.value)
1975 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
1977 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
1979 + HasParamEnv<'tcx>,
1982 this: TyAndLayout<'tcx>,
1984 variant_index: VariantIdx,
1985 ) -> TyAndLayout<'tcx> {
1986 let layout = match this.variants {
1987 Variants::Single { index }
1988 // If all variants but one are uninhabited, the variant layout is the enum layout.
1989 if index == variant_index &&
1990 // Don't confuse variants of uninhabited enums with the enum itself.
1991 // For more details see https://github.com/rust-lang/rust/issues/69763.
1992 this.fields != FieldsShape::Primitive =>
1997 Variants::Single { index } => {
1998 // Deny calling for_variant more than once for non-Single enums.
1999 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2000 assert_eq!(original_layout.variants, Variants::Single { index });
2003 let fields = match this.ty.kind() {
2004 ty::Adt(def, _) if def.variants.is_empty() =>
2005 bug!("for_variant called on zero-variant enum"),
2006 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2010 tcx.intern_layout(Layout {
2011 variants: Variants::Single { index: variant_index },
2012 fields: match NonZeroUsize::new(fields) {
2013 Some(fields) => FieldsShape::Union(fields),
2014 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2016 abi: Abi::Uninhabited,
2017 largest_niche: None,
2018 align: tcx.data_layout.i8_align,
2023 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2026 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2028 TyAndLayout { ty: this.ty, layout }
2031 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2033 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2034 let layout = Layout::scalar(cx, tag.clone());
2035 MaybeResult::from(Ok(TyAndLayout {
2036 layout: tcx.intern_layout(layout),
2037 ty: tag.value.to_ty(tcx),
2041 cx.layout_of(match *this.ty.kind() {
2050 | ty::GeneratorWitness(..)
2052 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2054 // Potentially-fat pointers.
2055 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2056 assert!(i < this.fields.count());
2058 // Reuse the fat `*T` type as its own thin pointer data field.
2059 // This provides information about, e.g., DST struct pointees
2060 // (which may have no non-DST form), and will work as long
2061 // as the `Abi` or `FieldsShape` is checked by users.
2063 let nil = tcx.mk_unit();
2064 let ptr_ty = if this.ty.is_unsafe_ptr() {
2067 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2069 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2071 ptr_layout.ty = this.ty;
2077 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2078 ty::Slice(_) | ty::Str => tcx.types.usize,
2079 ty::Dynamic(_, _) => {
2080 tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2081 /* FIXME: use actual fn pointers
2082 Warning: naively computing the number of entries in the
2083 vtable by counting the methods on the trait + methods on
2084 all parent traits does not work, because some methods can
2085 be not object safe and thus excluded from the vtable.
2086 Increase this counter if you tried to implement this but
2087 failed to do it without duplicating a lot of code from
2088 other places in the compiler: 2
2090 tcx.mk_array(tcx.types.usize, 3),
2091 tcx.mk_array(Option<fn()>),
2095 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2099 // Arrays and slices.
2100 ty::Array(element, _) | ty::Slice(element) => element,
2101 ty::Str => tcx.types.u8,
2103 // Tuples, generators and closures.
2104 ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
2106 ty::Generator(def_id, ref substs, _) => match this.variants {
2107 Variants::Single { index } => substs
2109 .state_tys(def_id, tcx)
2110 .nth(index.as_usize())
2114 Variants::Multiple { ref tag, tag_field, .. } => {
2116 return tag_layout(tag);
2118 substs.as_generator().prefix_tys().nth(i).unwrap()
2122 ty::Tuple(tys) => tys[i].expect_ty(),
2124 // SIMD vector types.
2125 ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
2128 ty::Adt(def, substs) => {
2129 match this.variants {
2130 Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2132 // Discriminant field for enums (where applicable).
2133 Variants::Multiple { ref tag, .. } => {
2135 return tag_layout(tag);
2142 | ty::Placeholder(..)
2146 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2150 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2151 let addr_space_of_ty = |ty: Ty<'tcx>| {
2152 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2155 let pointee_info = match *this.ty.kind() {
2156 ty::RawPtr(mt) if offset.bytes() == 0 => {
2157 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2159 align: layout.align.abi,
2161 address_space: addr_space_of_ty(mt.ty),
2164 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2165 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2168 align: layout.align.abi,
2170 address_space: cx.data_layout().instruction_address_space,
2174 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2175 let address_space = addr_space_of_ty(ty);
2177 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2178 let kind = match mt {
2179 hir::Mutability::Not => {
2186 hir::Mutability::Mut => {
2187 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2188 // panic=abort mode. That was deemed right, as prior versions had many bugs
2189 // in conjunction with unwinding, but later versions didn’t seem to have
2190 // said issues. See issue #31681.
2192 // Alas, later on we encountered a case where noalias would generate wrong
2193 // code altogether even with recent versions of LLVM in *safe* code with no
2194 // unwinding involved. See #54462.
2196 // For now, do not enable mutable_noalias by default at all, while the
2197 // issue is being figured out.
2198 if tcx.sess.opts.debugging_opts.mutable_noalias {
2199 PointerKind::UniqueBorrowed
2206 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2208 align: layout.align.abi,
2215 let mut data_variant = match this.variants {
2216 // Within the discriminant field, only the niche itself is
2217 // always initialized, so we only check for a pointer at its
2220 // If the niche is a pointer, it's either valid (according
2221 // to its type), or null (which the niche field's scalar
2222 // validity range encodes). This allows using
2223 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2224 // this will continue to work as long as we don't start
2225 // using more niches than just null (e.g., the first page of
2226 // the address space, or unaligned pointers).
2227 Variants::Multiple {
2228 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2231 } if this.fields.offset(tag_field) == offset => {
2232 Some(this.for_variant(cx, dataful_variant))
2237 if let Some(variant) = data_variant {
2238 // We're not interested in any unions.
2239 if let FieldsShape::Union(_) = variant.fields {
2240 data_variant = None;
2244 let mut result = None;
2246 if let Some(variant) = data_variant {
2247 let ptr_end = offset + Pointer.size(cx);
2248 for i in 0..variant.fields.count() {
2249 let field_start = variant.fields.offset(i);
2250 if field_start <= offset {
2251 let field = variant.field(cx, i);
2252 result = field.to_result().ok().and_then(|field| {
2253 if ptr_end <= field_start + field.size {
2254 // We found the right field, look inside it.
2256 field.pointee_info_at(cx, offset - field_start);
2262 if result.is_some() {
2269 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2270 if let Some(ref mut pointee) = result {
2271 if let ty::Adt(def, _) = this.ty.kind() {
2272 if def.is_box() && offset.bytes() == 0 {
2273 pointee.safe = Some(PointerKind::UniqueOwned);
2283 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2293 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2294 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2295 use crate::ty::layout::LayoutError::*;
2296 mem::discriminant(self).hash_stable(hcx, hasher);
2299 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2304 impl<'tcx> ty::Instance<'tcx> {
2305 // NOTE(eddyb) this is private to avoid using it from outside of
2306 // `FnAbi::of_instance` - any other uses are either too high-level
2307 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2308 // or should go through `FnAbi` instead, to avoid losing any
2309 // adjustments `FnAbi::of_instance` might be performing.
2310 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2311 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2312 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2315 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2316 // parameters unused if they show up in the signature, but not in the `mir::Body`
2317 // (i.e. due to being inside a projection that got normalized, see
2318 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2319 // track of a polymorphization `ParamEnv` to allow normalizing later.
2320 let mut sig = match *ty.kind() {
2321 ty::FnDef(def_id, substs) => tcx
2322 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2323 .subst(tcx, substs),
2324 _ => unreachable!(),
2327 if let ty::InstanceDef::VtableShim(..) = self.def {
2328 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2329 sig = sig.map_bound(|mut sig| {
2330 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2331 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2332 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2338 ty::Closure(def_id, substs) => {
2339 let sig = substs.as_closure().sig();
2341 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2342 sig.map_bound(|sig| {
2344 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2352 ty::Generator(_, substs, _) => {
2353 let sig = substs.as_generator().poly_sig();
2355 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2356 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2358 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2359 let pin_adt_ref = tcx.adt_def(pin_did);
2360 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2361 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2363 sig.map_bound(|sig| {
2364 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2365 let state_adt_ref = tcx.adt_def(state_did);
2367 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2368 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2371 [env_ty, sig.resume_ty].iter(),
2374 hir::Unsafety::Normal,
2375 rustc_target::spec::abi::Abi::Rust,
2379 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2384 pub trait FnAbiExt<'tcx, C>
2386 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2390 + HasParamEnv<'tcx>,
2392 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2394 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2395 /// instead, where the instance is a `InstanceDef::Virtual`.
2396 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2398 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2399 /// direct calls to an `fn`.
2401 /// NB: that includes virtual calls, which are represented by "direct calls"
2402 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2403 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2407 sig: ty::PolyFnSig<'tcx>,
2408 extra_args: &[Ty<'tcx>],
2409 caller_location: Option<Ty<'tcx>>,
2410 codegen_fn_attr_flags: CodegenFnAttrFlags,
2411 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2413 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2417 panic_strategy: PanicStrategy,
2418 codegen_fn_attr_flags: CodegenFnAttrFlags,
2421 if panic_strategy != PanicStrategy::Unwind {
2422 // In panic=abort mode we assume nothing can unwind anywhere, so
2423 // optimize based on this!
2425 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2426 // If a specific #[unwind] attribute is present, use that.
2428 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2429 // Special attribute for allocator functions, which can't unwind.
2432 if call_conv == Conv::Rust {
2433 // Any Rust method (or `extern "Rust" fn` or `extern
2434 // "rust-call" fn`) is explicitly allowed to unwind
2435 // (unless it has no-unwind attribute, handled above).
2438 // Anything else is either:
2440 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2442 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2444 // Foreign items (case 1) are assumed to not unwind; it is
2445 // UB otherwise. (At least for now; see also
2446 // rust-lang/rust#63909 and Rust RFC 2753.)
2448 // Items defined in Rust with non-Rust ABIs (case 2) are also
2449 // not supposed to unwind. Whether this should be enforced
2450 // (versus stating it is UB) and *how* it would be enforced
2451 // is currently under discussion; see rust-lang/rust#58794.
2453 // In either case, we mark item as explicitly nounwind.
2459 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2461 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2465 + HasParamEnv<'tcx>,
2467 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2468 // Assume that fn pointers may always unwind
2469 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2471 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2472 ArgAbi::new(cx.layout_of(ty))
2476 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2477 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2479 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2480 Some(cx.tcx().caller_location_ty())
2485 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2487 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2488 let mut layout = cx.layout_of(ty);
2489 // Don't pass the vtable, it's not an argument of the virtual fn.
2490 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2491 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2492 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2493 let fat_pointer_ty = if layout.is_unsized() {
2494 // unsized `self` is passed as a pointer to `self`
2495 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2496 cx.tcx().mk_mut_ptr(layout.ty)
2499 Abi::ScalarPair(..) => (),
2500 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2503 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2504 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2505 // elsewhere in the compiler as a method on a `dyn Trait`.
2506 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2507 // get a built-in pointer type
2508 let mut fat_pointer_layout = layout;
2509 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2510 && !fat_pointer_layout.ty.is_region_ptr()
2512 for i in 0..fat_pointer_layout.fields.count() {
2513 let field_layout = fat_pointer_layout.field(cx, i);
2515 if !field_layout.is_zst() {
2516 fat_pointer_layout = field_layout;
2517 continue 'descend_newtypes;
2521 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2524 fat_pointer_layout.ty
2527 // we now have a type like `*mut RcBox<dyn Trait>`
2528 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2529 // this is understood as a special case elsewhere in the compiler
2530 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2531 layout = cx.layout_of(unit_pointer_ty);
2532 layout.ty = fat_pointer_ty;
2540 sig: ty::PolyFnSig<'tcx>,
2541 extra_args: &[Ty<'tcx>],
2542 caller_location: Option<Ty<'tcx>>,
2543 codegen_fn_attr_flags: CodegenFnAttrFlags,
2544 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2546 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2548 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2550 use rustc_target::spec::abi::Abi::*;
2551 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2552 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2554 // It's the ABI's job to select this, not ours.
2555 System => bug!("system abi should be selected elsewhere"),
2556 EfiApi => bug!("eficall abi should be selected elsewhere"),
2558 Stdcall => Conv::X86Stdcall,
2559 Fastcall => Conv::X86Fastcall,
2560 Vectorcall => Conv::X86VectorCall,
2561 Thiscall => Conv::X86ThisCall,
2563 Unadjusted => Conv::C,
2564 Win64 => Conv::X86_64Win64,
2565 SysV64 => Conv::X86_64SysV,
2566 Aapcs => Conv::ArmAapcs,
2567 PtxKernel => Conv::PtxKernel,
2568 Msp430Interrupt => Conv::Msp430Intr,
2569 X86Interrupt => Conv::X86Intr,
2570 AmdGpuKernel => Conv::AmdGpuKernel,
2571 AvrInterrupt => Conv::AvrInterrupt,
2572 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2574 // These API constants ought to be more specific...
2578 let mut inputs = sig.inputs();
2579 let extra_args = if sig.abi == RustCall {
2580 assert!(!sig.c_variadic && extra_args.is_empty());
2582 if let Some(input) = sig.inputs().last() {
2583 if let ty::Tuple(tupled_arguments) = input.kind() {
2584 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2585 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2588 "argument to function with \"rust-call\" ABI \
2594 "argument to function with \"rust-call\" ABI \
2599 assert!(sig.c_variadic || extra_args.is_empty());
2603 let target = &cx.tcx().sess.target;
2604 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2605 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2606 let linux_s390x_gnu_like =
2607 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2608 let linux_sparc64_gnu_like =
2609 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2610 let linux_powerpc_gnu_like =
2611 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2612 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2614 // Handle safe Rust thin and fat pointers.
2615 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2617 layout: TyAndLayout<'tcx>,
2620 // Booleans are always an i1 that needs to be zero-extended.
2621 if scalar.is_bool() {
2622 attrs.set(ArgAttribute::ZExt);
2626 // Only pointer types handled below.
2627 if scalar.value != Pointer {
2631 if scalar.valid_range.start() < scalar.valid_range.end() {
2632 if *scalar.valid_range.start() > 0 {
2633 attrs.set(ArgAttribute::NonNull);
2637 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2638 if let Some(kind) = pointee.safe {
2639 attrs.pointee_align = Some(pointee.align);
2641 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2642 // for the entire duration of the function as they can be deallocated
2643 // at any time. Set their valid size to 0.
2644 attrs.pointee_size = match kind {
2645 PointerKind::UniqueOwned => Size::ZERO,
2649 // `Box` pointer parameters never alias because ownership is transferred
2650 // `&mut` pointer parameters never alias other parameters,
2651 // or mutable global data
2653 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2654 // and can be marked as both `readonly` and `noalias`, as
2655 // LLVM's definition of `noalias` is based solely on memory
2656 // dependencies rather than pointer equality
2657 let no_alias = match kind {
2658 PointerKind::Shared => false,
2659 PointerKind::UniqueOwned => true,
2660 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2663 attrs.set(ArgAttribute::NoAlias);
2666 if kind == PointerKind::Frozen && !is_return {
2667 attrs.set(ArgAttribute::ReadOnly);
2673 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2674 let is_return = arg_idx.is_none();
2675 let mut arg = mk_arg_type(ty, arg_idx);
2676 if arg.layout.is_zst() {
2677 // For some forsaken reason, x86_64-pc-windows-gnu
2678 // doesn't ignore zero-sized struct arguments.
2679 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2683 && !linux_s390x_gnu_like
2684 && !linux_sparc64_gnu_like
2685 && !linux_powerpc_gnu_like)
2687 arg.mode = PassMode::Ignore;
2691 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2692 if !is_return && rust_abi {
2693 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2694 let mut a_attrs = ArgAttributes::new();
2695 let mut b_attrs = ArgAttributes::new();
2696 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2697 adjust_for_rust_scalar(
2701 a.value.size(cx).align_to(b.value.align(cx).abi),
2704 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2709 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2710 if let PassMode::Direct(ref mut attrs) = arg.mode {
2711 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2718 let mut fn_abi = FnAbi {
2719 ret: arg_of(sig.output(), None),
2724 .chain(caller_location)
2726 .map(|(i, ty)| arg_of(ty, Some(i)))
2728 c_variadic: sig.c_variadic,
2729 fixed_count: inputs.len(),
2731 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2733 fn_abi.adjust_for_abi(cx, sig.abi);
2734 debug!("FnAbi::new_internal = {:?}", fn_abi);
2738 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2739 if abi == SpecAbi::Unadjusted {
2743 if abi == SpecAbi::Rust
2744 || abi == SpecAbi::RustCall
2745 || abi == SpecAbi::RustIntrinsic
2746 || abi == SpecAbi::PlatformIntrinsic
2748 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, is_ret: bool| {
2749 if arg.is_ignore() {
2753 match arg.layout.abi {
2754 Abi::Aggregate { .. } => {}
2756 // This is a fun case! The gist of what this is doing is
2757 // that we want callers and callees to always agree on the
2758 // ABI of how they pass SIMD arguments. If we were to *not*
2759 // make these arguments indirect then they'd be immediates
2760 // in LLVM, which means that they'd used whatever the
2761 // appropriate ABI is for the callee and the caller. That
2762 // means, for example, if the caller doesn't have AVX
2763 // enabled but the callee does, then passing an AVX argument
2764 // across this boundary would cause corrupt data to show up.
2766 // This problem is fixed by unconditionally passing SIMD
2767 // arguments through memory between callers and callees
2768 // which should get them all to agree on ABI regardless of
2769 // target feature sets. Some more information about this
2770 // issue can be found in #44367.
2772 // Note that the platform intrinsic ABI is exempt here as
2773 // that's how we connect up to LLVM and it's unstable
2774 // anyway, we control all calls to it in libstd.
2776 if abi != SpecAbi::PlatformIntrinsic
2777 && cx.tcx().sess.target.simd_types_indirect =>
2779 arg.make_indirect();
2786 // Return structures up to 2 pointers in size by value, matching `ScalarPair`. LLVM
2787 // will usually return these in 2 registers, which is more efficient than by-ref.
2788 let max_by_val_size = if is_ret { Pointer.size(cx) * 2 } else { Pointer.size(cx) };
2789 let size = arg.layout.size;
2791 if arg.layout.is_unsized() || size > max_by_val_size {
2792 arg.make_indirect();
2794 // We want to pass small aggregates as immediates, but using
2795 // a LLVM aggregate type for this leads to bad optimizations,
2796 // so we pick an appropriately sized integer type instead.
2797 arg.cast_to(Reg { kind: RegKind::Integer, size });
2800 fixup(&mut self.ret, true);
2801 for arg in &mut self.args {
2804 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2805 attrs.set(ArgAttribute::StructRet);
2810 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2811 cx.tcx().sess.fatal(&msg);