1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_ast::ast::{self, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::{GeneratorStateLangItem, PinTypeLangItem};
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42 impl IntegerExt for Integer {
43 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44 match (*self, signed) {
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
49 (I128, false) => tcx.types.u128,
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
54 (I128, true) => tcx.types.i128,
58 /// Gets the Integer type from an attr::IntType.
59 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60 let dl = cx.data_layout();
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69 dl.ptr_sized_integer()
74 /// Finds the appropriate Integer type and signedness for the given
75 /// signed discriminant range and `#[repr]` attribute.
76 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77 /// that shouldn't affect anything, other than maybe debuginfo.
84 ) -> (Integer, bool) {
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
100 "Integer::repr_discr: `#[repr]` hint too small for \
101 discriminant range of enum `{}",
105 return (discr, ity.is_signed());
109 match &tcx.sess.target.target.arch[..] {
110 // WARNING: the ARM EABI has two variants; the one corresponding
111 // to `at_least == I32` appears to be used on Linux and NetBSD,
112 // but some systems may use the variant corresponding to no
113 // lower bound. However, we don't run on those yet...?
114 "arm" => min_from_extern = Some(I32),
115 _ => min_from_extern = Some(I32),
119 let at_least = min_from_extern.unwrap_or(min_default);
121 // If there are no negative values, we can use the unsigned fit.
123 (cmp::max(unsigned_fit, at_least), false)
125 (cmp::max(signed_fit, at_least), true)
130 pub trait PrimitiveExt {
131 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
135 impl PrimitiveExt for Primitive {
136 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
138 Int(i, signed) => i.to_ty(tcx, signed),
139 F32 => tcx.types.f32,
140 F64 => tcx.types.f64,
141 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
145 /// Return an *integer* type matching this primitive.
146 /// Useful in particular when dealing with enum discriminants.
147 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
149 Int(i, signed) => i.to_ty(tcx, signed),
150 Pointer => tcx.types.usize,
151 F32 | F64 => bug!("floats do not have an int type"),
156 /// The first half of a fat pointer.
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
162 /// The second half of a fat pointer.
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
168 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
169 pub enum LayoutError<'tcx> {
171 SizeOverflow(Ty<'tcx>),
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
177 LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
178 LayoutError::SizeOverflow(ty) => {
179 write!(f, "the type `{:?}` is too big for the current architecture", ty)
187 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189 ty::tls::with_related_context(tcx, move |icx| {
190 let (param_env, ty) = query.into_parts();
192 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
196 // Update the ImplicitCtxt to increase the layout_depth
197 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
199 ty::tls::enter_context(&icx, |_| {
200 let cx = LayoutCx { tcx, param_env };
201 let layout = cx.layout_raw_uncached(ty);
202 // Type-level uninhabitedness should always imply ABI uninhabitedness.
203 if let Ok(layout) = layout {
204 if ty.conservative_is_privately_uninhabited(tcx) {
205 assert!(layout.abi.is_uninhabited());
213 pub fn provide(providers: &mut ty::query::Providers) {
214 *providers = ty::query::Providers { layout_raw, ..*providers };
217 pub struct LayoutCx<'tcx, C> {
219 pub param_env: ty::ParamEnv<'tcx>,
222 #[derive(Copy, Clone, Debug)]
224 /// A tuple, closure, or univariant which cannot be coerced to unsized.
226 /// A univariant, the last field of which may be coerced to unsized.
228 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229 Prefixed(Size, Align),
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238 let mut inverse = vec![0; map.len()];
239 for i in 0..map.len() {
240 inverse[map[i] as usize] = i as u32;
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247 let dl = self.data_layout();
248 let b_align = b.value.align(dl);
249 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250 let b_offset = a.value.size(dl).align_to(b_align.abi);
251 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
253 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254 // returns the last maximum.
255 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
257 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258 .max_by_key(|niche| niche.available(dl));
261 variants: Variants::Single { index: VariantIdx::new(0) },
262 fields: FieldsShape::Arbitrary {
263 offsets: vec![Size::ZERO, b_offset],
264 memory_index: vec![0, 1],
266 abi: Abi::ScalarPair(a, b),
273 fn univariant_uninterned(
276 fields: &[TyAndLayout<'_>],
279 ) -> Result<Layout, LayoutError<'tcx>> {
280 let dl = self.data_layout();
281 let pack = repr.pack;
282 if pack.is_some() && repr.align.is_some() {
283 bug!("struct cannot be packed and aligned");
286 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
288 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
290 let optimize = !repr.inhibit_struct_field_reordering_opt();
293 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294 let optimizing = &mut inverse_memory_index[..end];
295 let field_align = |f: &TyAndLayout<'_>| {
296 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
299 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300 optimizing.sort_by_key(|&x| {
301 // Place ZSTs first to avoid "interesting offsets",
302 // especially with only one or two non-ZST fields.
303 let f = &fields[x as usize];
304 (!f.is_zst(), cmp::Reverse(field_align(f)))
307 StructKind::Prefixed(..) => {
308 // Sort in ascending alignment so that the layout stay optimal
309 // regardless of the prefix
310 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
315 // inverse_memory_index holds field indices by increasing memory offset.
316 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317 // We now write field offsets to the corresponding offset slot;
318 // field 5 with offset 0 puts 0 in offsets[5].
319 // At the bottom of this function, we invert `inverse_memory_index` to
320 // produce `memory_index` (see `invert_mapping`).
322 let mut sized = true;
323 let mut offsets = vec![Size::ZERO; fields.len()];
324 let mut offset = Size::ZERO;
325 let mut largest_niche = None;
326 let mut largest_niche_available = 0;
328 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
330 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331 align = align.max(AbiAndPrefAlign::new(prefix_align));
332 offset = prefix_size.align_to(prefix_align);
335 for &i in &inverse_memory_index {
336 let field = fields[i as usize];
338 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
341 if field.is_unsized() {
345 // Invariant: offset < dl.obj_size_bound() <= 1<<61
346 let field_align = if let Some(pack) = pack {
347 field.align.min(AbiAndPrefAlign::new(pack))
351 offset = offset.align_to(field_align.abi);
352 align = align.max(field_align);
354 debug!("univariant offset: {:?} field: {:#?}", offset, field);
355 offsets[i as usize] = offset;
357 if !repr.hide_niche() {
358 if let Some(mut niche) = field.largest_niche.clone() {
359 let available = niche.available(dl);
360 if available > largest_niche_available {
361 largest_niche_available = available;
362 niche.offset += offset;
363 largest_niche = Some(niche);
368 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
371 if let Some(repr_align) = repr.align {
372 align = align.max(AbiAndPrefAlign::new(repr_align));
375 debug!("univariant min_size: {:?}", offset);
376 let min_size = offset;
378 // As stated above, inverse_memory_index holds field indices by increasing offset.
379 // This makes it an already-sorted view of the offsets vec.
380 // To invert it, consider:
381 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382 // Field 5 would be the first element, so memory_index is i:
383 // Note: if we didn't optimize, it's already right.
386 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
388 let size = min_size.align_to(align.abi);
389 let mut abi = Abi::Aggregate { sized };
391 // Unpack newtype ABIs and find scalar pairs.
392 if sized && size.bytes() > 0 {
393 // All other fields must be ZSTs, and we need them to all start at 0.
394 let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
395 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
396 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
398 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
399 // We have exactly one non-ZST field.
400 (Some((i, field)), None, None) => {
401 // Field fills the struct and it has a scalar or scalar pair ABI.
402 if offsets[i].bytes() == 0
403 && align.abi == field.align.abi
404 && size == field.size
407 // For plain scalars, or vectors of them, we can't unpack
408 // newtypes for `#[repr(C)]`, as that affects C ABIs.
409 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
410 abi = field.abi.clone();
412 // But scalar pairs are Rust-specific and get
413 // treated as aggregates by C ABIs anyway.
414 Abi::ScalarPair(..) => {
415 abi = field.abi.clone();
422 // Two non-ZST fields, and they're both scalars.
427 layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
433 layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
438 // Order by the memory placement, not source order.
439 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
444 let pair = self.scalar_pair(a.clone(), b.clone());
445 let pair_offsets = match pair.fields {
446 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
447 assert_eq!(memory_index, &[0, 1]);
452 if offsets[i] == pair_offsets[0]
453 && offsets[j] == pair_offsets[1]
454 && align == pair.align
457 // We can use `ScalarPair` only when it matches our
458 // already computed layout (including `#[repr(C)]`).
468 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
469 abi = Abi::Uninhabited;
473 variants: Variants::Single { index: VariantIdx::new(0) },
474 fields: FieldsShape::Arbitrary { offsets, memory_index },
482 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
484 let param_env = self.param_env;
485 let dl = self.data_layout();
486 let scalar_unit = |value: Primitive| {
487 let bits = value.size(dl).bits();
488 assert!(bits <= 128);
489 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
491 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
493 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
494 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
496 debug_assert!(!ty.has_infer_types_or_consts());
500 ty::Bool => tcx.intern_layout(Layout::scalar(
502 Scalar { value: Int(I8, false), valid_range: 0..=1 },
504 ty::Char => tcx.intern_layout(Layout::scalar(
506 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
508 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
509 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
510 ty::Float(fty) => scalar(match fty {
511 ast::FloatTy::F32 => F32,
512 ast::FloatTy::F64 => F64,
515 let mut ptr = scalar_unit(Pointer);
516 ptr.valid_range = 1..=*ptr.valid_range.end();
517 tcx.intern_layout(Layout::scalar(self, ptr))
521 ty::Never => tcx.intern_layout(Layout {
522 variants: Variants::Single { index: VariantIdx::new(0) },
523 fields: FieldsShape::Primitive,
524 abi: Abi::Uninhabited,
530 // Potentially-wide pointers.
531 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
532 let mut data_ptr = scalar_unit(Pointer);
533 if !ty.is_unsafe_ptr() {
534 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
537 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
538 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
539 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
542 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
543 let metadata = match unsized_part.kind {
545 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
547 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
549 let mut vtable = scalar_unit(Pointer);
550 vtable.valid_range = 1..=*vtable.valid_range.end();
553 _ => return Err(LayoutError::Unknown(unsized_part)),
556 // Effectively a (ptr, meta) tuple.
557 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
560 // Arrays and slices.
561 ty::Array(element, mut count) => {
562 if count.has_projections() {
563 count = tcx.normalize_erasing_regions(param_env, count);
564 if count.has_projections() {
565 return Err(LayoutError::Unknown(ty));
569 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
570 let element = self.layout_of(element)?;
572 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
574 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
577 Abi::Aggregate { sized: true }
580 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
582 tcx.intern_layout(Layout {
583 variants: Variants::Single { index: VariantIdx::new(0) },
584 fields: FieldsShape::Array { stride: element.size, count },
587 align: element.align,
591 ty::Slice(element) => {
592 let element = self.layout_of(element)?;
593 tcx.intern_layout(Layout {
594 variants: Variants::Single { index: VariantIdx::new(0) },
595 fields: FieldsShape::Array { stride: element.size, count: 0 },
596 abi: Abi::Aggregate { sized: false },
598 align: element.align,
602 ty::Str => tcx.intern_layout(Layout {
603 variants: Variants::Single { index: VariantIdx::new(0) },
604 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
605 abi: Abi::Aggregate { sized: false },
612 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
613 ty::Dynamic(..) | ty::Foreign(..) => {
614 let mut unit = self.univariant_uninterned(
617 &ReprOptions::default(),
618 StructKind::AlwaysSized,
621 Abi::Aggregate { ref mut sized } => *sized = false,
624 tcx.intern_layout(unit)
627 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
629 ty::Closure(_, ref substs) => {
630 let tys = substs.as_closure().upvar_tys();
632 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
633 &ReprOptions::default(),
634 StructKind::AlwaysSized,
640 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
644 .map(|k| self.layout_of(k.expect_ty()))
645 .collect::<Result<Vec<_>, _>>()?,
646 &ReprOptions::default(),
651 // SIMD vector types.
652 ty::Adt(def, ..) if def.repr.simd() => {
653 let element = self.layout_of(ty.simd_type(tcx))?;
654 let count = ty.simd_size(tcx);
656 let scalar = match element.abi {
657 Abi::Scalar(ref scalar) => scalar.clone(),
659 tcx.sess.fatal(&format!(
660 "monomorphising SIMD type `{}` with \
661 a non-machine element type `{}`",
667 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
668 let align = dl.vector_align(size);
669 let size = size.align_to(align.abi);
671 tcx.intern_layout(Layout {
672 variants: Variants::Single { index: VariantIdx::new(0) },
673 fields: FieldsShape::Array { stride: element.size, count },
674 abi: Abi::Vector { element: scalar, count },
675 largest_niche: element.largest_niche.clone(),
682 ty::Adt(def, substs) => {
683 // Cache the field layouts.
690 .map(|field| self.layout_of(field.ty(tcx, substs)))
691 .collect::<Result<Vec<_>, _>>()
693 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
696 if def.repr.pack.is_some() && def.repr.align.is_some() {
697 bug!("union cannot be packed and aligned");
701 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
703 if let Some(repr_align) = def.repr.align {
704 align = align.max(AbiAndPrefAlign::new(repr_align));
707 let optimize = !def.repr.inhibit_union_abi_opt();
708 let mut size = Size::ZERO;
709 let mut abi = Abi::Aggregate { sized: true };
710 let index = VariantIdx::new(0);
711 for field in &variants[index] {
712 assert!(!field.is_unsized());
713 align = align.max(field.align);
715 // If all non-ZST fields have the same ABI, forward this ABI
716 if optimize && !field.is_zst() {
717 // Normalize scalar_unit to the maximal valid range
718 let field_abi = match &field.abi {
719 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
720 Abi::ScalarPair(x, y) => {
721 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
723 Abi::Vector { element: x, count } => {
724 Abi::Vector { element: scalar_unit(x.value), count: *count }
726 Abi::Uninhabited | Abi::Aggregate { .. } => {
727 Abi::Aggregate { sized: true }
731 if size == Size::ZERO {
732 // first non ZST: initialize 'abi'
734 } else if abi != field_abi {
735 // different fields have different ABI: reset to Aggregate
736 abi = Abi::Aggregate { sized: true };
740 size = cmp::max(size, field.size);
743 if let Some(pack) = def.repr.pack {
744 align = align.min(AbiAndPrefAlign::new(pack));
747 return Ok(tcx.intern_layout(Layout {
748 variants: Variants::Single { index },
749 fields: FieldsShape::Union(
750 NonZeroUsize::new(variants[index].len())
751 .ok_or(LayoutError::Unknown(ty))?,
756 size: size.align_to(align.abi),
760 // A variant is absent if it's uninhabited and only has ZST fields.
761 // Present uninhabited variants only require space for their fields,
762 // but *not* an encoding of the discriminant (e.g., a tag value).
763 // See issue #49298 for more details on the need to leave space
764 // for non-ZST uninhabited data (mostly partial initialization).
765 let absent = |fields: &[TyAndLayout<'_>]| {
766 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
767 let is_zst = fields.iter().all(|f| f.is_zst());
768 uninhabited && is_zst
770 let (present_first, present_second) = {
771 let mut present_variants = variants
773 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
774 (present_variants.next(), present_variants.next())
776 let present_first = match present_first {
777 Some(present_first) => present_first,
778 // Uninhabited because it has no variants, or only absent ones.
779 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
780 // If it's a struct, still compute a layout so that we can still compute the
782 None => VariantIdx::new(0),
785 let is_struct = !def.is_enum() ||
786 // Only one variant is present.
787 (present_second.is_none() &&
788 // Representation optimizations are allowed.
789 !def.repr.inhibit_enum_layout_opt());
791 // Struct, or univariant enum equivalent to a struct.
792 // (Typechecking will reject discriminant-sizing attrs.)
794 let v = present_first;
795 let kind = if def.is_enum() || variants[v].is_empty() {
796 StructKind::AlwaysSized
798 let param_env = tcx.param_env(def.did);
799 let last_field = def.variants[v].fields.last().unwrap();
801 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
803 StructKind::MaybeUnsized
805 StructKind::AlwaysSized
809 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
810 st.variants = Variants::Single { index: v };
811 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
813 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
814 // the asserts ensure that we are not using the
815 // `#[rustc_layout_scalar_valid_range(n)]`
816 // attribute to widen the range of anything as that would probably
817 // result in UB somewhere
818 // FIXME(eddyb) the asserts are probably not needed,
819 // as larger validity ranges would result in missed
820 // optimizations, *not* wrongly assuming the inner
821 // value is valid. e.g. unions enlarge validity ranges,
822 // because the values may be uninitialized.
823 if let Bound::Included(start) = start {
824 // FIXME(eddyb) this might be incorrect - it doesn't
825 // account for wrap-around (end < start) ranges.
826 assert!(*scalar.valid_range.start() <= start);
827 scalar.valid_range = start..=*scalar.valid_range.end();
829 if let Bound::Included(end) = end {
830 // FIXME(eddyb) this might be incorrect - it doesn't
831 // account for wrap-around (end < start) ranges.
832 assert!(*scalar.valid_range.end() >= end);
833 scalar.valid_range = *scalar.valid_range.start()..=end;
836 // Update `largest_niche` if we have introduced a larger niche.
837 let niche = if def.repr.hide_niche() {
840 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
842 if let Some(niche) = niche {
843 match &st.largest_niche {
844 Some(largest_niche) => {
845 // Replace the existing niche even if they're equal,
846 // because this one is at a lower offset.
847 if largest_niche.available(dl) <= niche.available(dl) {
848 st.largest_niche = Some(niche);
851 None => st.largest_niche = Some(niche),
856 start == Bound::Unbounded && end == Bound::Unbounded,
857 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
863 return Ok(tcx.intern_layout(st));
866 // At this point, we have handled all unions and
867 // structs. (We have also handled univariant enums
868 // that allow representation optimization.)
869 assert!(def.is_enum());
871 // The current code for niche-filling relies on variant indices
872 // instead of actual discriminants, so dataful enums with
873 // explicit discriminants (RFC #2363) would misbehave.
874 let no_explicit_discriminants = def
877 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
879 let mut niche_filling_layout = None;
881 // Niche-filling enum optimization.
882 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
883 let mut dataful_variant = None;
884 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
886 // Find one non-ZST variant.
887 'variants: for (v, fields) in variants.iter_enumerated() {
893 if dataful_variant.is_none() {
894 dataful_variant = Some(v);
897 dataful_variant = None;
902 niche_variants = *niche_variants.start().min(&v)..=v;
905 if niche_variants.start() > niche_variants.end() {
906 dataful_variant = None;
909 if let Some(i) = dataful_variant {
910 let count = (niche_variants.end().as_u32()
911 - niche_variants.start().as_u32()
914 // Find the field with the largest niche
915 let niche_candidate = variants[i]
918 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
919 .max_by_key(|(_, niche)| niche.available(dl));
921 if let Some((field_index, niche, (niche_start, niche_scalar))) =
922 niche_candidate.and_then(|(field_index, niche)| {
923 Some((field_index, niche, niche.reserve(self, count)?))
926 let mut align = dl.aggregate_align;
930 let mut st = self.univariant_uninterned(
934 StructKind::AlwaysSized,
936 st.variants = Variants::Single { index: j };
938 align = align.max(st.align);
942 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
944 let offset = st[i].fields.offset(field_index) + niche.offset;
945 let size = st[i].size;
947 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
951 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
952 Abi::ScalarPair(ref first, ref second) => {
953 // We need to use scalar_unit to reset the
954 // valid range to the maximal one for that
955 // primitive, because only the niche is
956 // guaranteed to be initialised, not the
958 if offset.bytes() == 0 {
960 niche_scalar.clone(),
961 scalar_unit(second.value),
965 scalar_unit(first.value),
966 niche_scalar.clone(),
970 _ => Abi::Aggregate { sized: true },
975 Niche::from_scalar(dl, offset, niche_scalar.clone());
977 niche_filling_layout = Some(Layout {
978 variants: Variants::Multiple {
980 tag_encoding: TagEncoding::Niche {
988 fields: FieldsShape::Arbitrary {
989 offsets: vec![offset],
990 memory_index: vec![0],
1001 let (mut min, mut max) = (i128::MAX, i128::MIN);
1002 let discr_type = def.repr.discr_type();
1003 let bits = Integer::from_attr(self, discr_type).size().bits();
1004 for (i, discr) in def.discriminants(tcx) {
1005 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1008 let mut x = discr.val as i128;
1009 if discr_type.is_signed() {
1010 // sign extend the raw representation to be an i128
1011 x = (x << (128 - bits)) >> (128 - bits);
1020 // We might have no inhabited variants, so pretend there's at least one.
1021 if (min, max) == (i128::MAX, i128::MIN) {
1025 assert!(min <= max, "discriminant range is {}...{}", min, max);
1026 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1028 let mut align = dl.aggregate_align;
1029 let mut size = Size::ZERO;
1031 // We're interested in the smallest alignment, so start large.
1032 let mut start_align = Align::from_bytes(256).unwrap();
1033 assert_eq!(Integer::for_align(dl, start_align), None);
1035 // repr(C) on an enum tells us to make a (tag, union) layout,
1036 // so we need to grow the prefix alignment to be at least
1037 // the alignment of the union. (This value is used both for
1038 // determining the alignment of the overall enum, and the
1039 // determining the alignment of the payload after the tag.)
1040 let mut prefix_align = min_ity.align(dl).abi;
1042 for fields in &variants {
1043 for field in fields {
1044 prefix_align = prefix_align.max(field.align.abi);
1049 // Create the set of structs that represent each variant.
1050 let mut layout_variants = variants
1052 .map(|(i, field_layouts)| {
1053 let mut st = self.univariant_uninterned(
1057 StructKind::Prefixed(min_ity.size(), prefix_align),
1059 st.variants = Variants::Single { index: i };
1060 // Find the first field we can't move later
1061 // to make room for a larger discriminant.
1063 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1065 if !field.is_zst() || field.align.abi.bytes() != 1 {
1066 start_align = start_align.min(field.align.abi);
1070 size = cmp::max(size, st.size);
1071 align = align.max(st.align);
1074 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1076 // Align the maximum variant size to the largest alignment.
1077 size = size.align_to(align.abi);
1079 if size.bytes() >= dl.obj_size_bound() {
1080 return Err(LayoutError::SizeOverflow(ty));
1083 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1084 if typeck_ity < min_ity {
1085 // It is a bug if Layout decided on a greater discriminant size than typeck for
1086 // some reason at this point (based on values discriminant can take on). Mostly
1087 // because this discriminant will be loaded, and then stored into variable of
1088 // type calculated by typeck. Consider such case (a bug): typeck decided on
1089 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1090 // discriminant values. That would be a bug, because then, in codegen, in order
1091 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1092 // space necessary to represent would have to be discarded (or layout is wrong
1093 // on thinking it needs 16 bits)
1095 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1099 // However, it is fine to make discr type however large (as an optimisation)
1100 // after this point – we’ll just truncate the value we load in codegen.
1103 // Check to see if we should use a different type for the
1104 // discriminant. We can safely use a type with the same size
1105 // as the alignment of the first field of each variant.
1106 // We increase the size of the discriminant to avoid LLVM copying
1107 // padding when it doesn't need to. This normally causes unaligned
1108 // load/stores and excessive memcpy/memset operations. By using a
1109 // bigger integer size, LLVM can be sure about its contents and
1110 // won't be so conservative.
1112 // Use the initial field alignment
1113 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1116 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1119 // If the alignment is not larger than the chosen discriminant size,
1120 // don't use the alignment as the final size.
1124 // Patch up the variants' first few fields.
1125 let old_ity_size = min_ity.size();
1126 let new_ity_size = ity.size();
1127 for variant in &mut layout_variants {
1128 match variant.fields {
1129 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1131 if *i <= old_ity_size {
1132 assert_eq!(*i, old_ity_size);
1136 // We might be making the struct larger.
1137 if variant.size <= old_ity_size {
1138 variant.size = new_ity_size;
1146 let tag_mask = !0u128 >> (128 - ity.size().bits());
1148 value: Int(ity, signed),
1149 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1151 let mut abi = Abi::Aggregate { sized: true };
1152 if tag.value.size(dl) == size {
1153 abi = Abi::Scalar(tag.clone());
1155 // Try to use a ScalarPair for all tagged enums.
1156 let mut common_prim = None;
1157 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1158 let offsets = match layout_variant.fields {
1159 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1163 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1164 let (field, offset) = match (fields.next(), fields.next()) {
1165 (None, None) => continue,
1166 (Some(pair), None) => pair,
1172 let prim = match field.abi {
1173 Abi::Scalar(ref scalar) => scalar.value,
1179 if let Some(pair) = common_prim {
1180 // This is pretty conservative. We could go fancier
1181 // by conflating things like i32 and u32, or even
1182 // realising that (u8, u8) could just cohabit with
1184 if pair != (prim, offset) {
1189 common_prim = Some((prim, offset));
1192 if let Some((prim, offset)) = common_prim {
1193 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1194 let pair_offsets = match pair.fields {
1195 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1196 assert_eq!(memory_index, &[0, 1]);
1201 if pair_offsets[0] == Size::ZERO
1202 && pair_offsets[1] == *offset
1203 && align == pair.align
1204 && size == pair.size
1206 // We can use `ScalarPair` only when it matches our
1207 // already computed layout (including `#[repr(C)]`).
1213 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1214 abi = Abi::Uninhabited;
1217 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1219 let tagged_layout = Layout {
1220 variants: Variants::Multiple {
1222 tag_encoding: TagEncoding::Direct,
1224 variants: layout_variants,
1226 fields: FieldsShape::Arbitrary {
1227 offsets: vec![Size::ZERO],
1228 memory_index: vec![0],
1236 let best_layout = match (tagged_layout, niche_filling_layout) {
1237 (tagged_layout, Some(niche_filling_layout)) => {
1238 // Pick the smaller layout; otherwise,
1239 // pick the layout with the larger niche; otherwise,
1240 // pick tagged as it has simpler codegen.
1241 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1243 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1244 (layout.size, cmp::Reverse(niche_size))
1247 (tagged_layout, None) => tagged_layout,
1250 tcx.intern_layout(best_layout)
1253 // Types with no meaningful known layout.
1254 ty::Projection(_) | ty::Opaque(..) => {
1255 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1256 if ty == normalized {
1257 return Err(LayoutError::Unknown(ty));
1259 tcx.layout_raw(param_env.and(normalized))?
1262 ty::Bound(..) | ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1263 bug!("Layout::compute: unexpected type `{}`", ty)
1266 ty::Param(_) | ty::Error(_) => {
1267 return Err(LayoutError::Unknown(ty));
1273 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1274 #[derive(Clone, Debug, PartialEq)]
1275 enum SavedLocalEligibility {
1277 Assigned(VariantIdx),
1278 // FIXME: Use newtype_index so we aren't wasting bytes
1279 Ineligible(Option<u32>),
1282 // When laying out generators, we divide our saved local fields into two
1283 // categories: overlap-eligible and overlap-ineligible.
1285 // Those fields which are ineligible for overlap go in a "prefix" at the
1286 // beginning of the layout, and always have space reserved for them.
1288 // Overlap-eligible fields are only assigned to one variant, so we lay
1289 // those fields out for each variant and put them right after the
1292 // Finally, in the layout details, we point to the fields from the
1293 // variants they are assigned to. It is possible for some fields to be
1294 // included in multiple variants. No field ever "moves around" in the
1295 // layout; its offset is always the same.
1297 // Also included in the layout are the upvars and the discriminant.
1298 // These are included as fields on the "outer" layout; they are not part
1300 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1301 /// Compute the eligibility and assignment of each local.
1302 fn generator_saved_local_eligibility(
1304 info: &GeneratorLayout<'tcx>,
1305 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1306 use SavedLocalEligibility::*;
1308 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1309 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1311 // The saved locals not eligible for overlap. These will get
1312 // "promoted" to the prefix of our generator.
1313 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1315 // Figure out which of our saved locals are fields in only
1316 // one variant. The rest are deemed ineligible for overlap.
1317 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1318 for local in fields {
1319 match assignments[*local] {
1321 assignments[*local] = Assigned(variant_index);
1324 // We've already seen this local at another suspension
1325 // point, so it is no longer a candidate.
1327 "removing local {:?} in >1 variant ({:?}, {:?})",
1332 ineligible_locals.insert(*local);
1333 assignments[*local] = Ineligible(None);
1340 // Next, check every pair of eligible locals to see if they
1342 for local_a in info.storage_conflicts.rows() {
1343 let conflicts_a = info.storage_conflicts.count(local_a);
1344 if ineligible_locals.contains(local_a) {
1348 for local_b in info.storage_conflicts.iter(local_a) {
1349 // local_a and local_b are storage live at the same time, therefore they
1350 // cannot overlap in the generator layout. The only way to guarantee
1351 // this is if they are in the same variant, or one is ineligible
1352 // (which means it is stored in every variant).
1353 if ineligible_locals.contains(local_b)
1354 || assignments[local_a] == assignments[local_b]
1359 // If they conflict, we will choose one to make ineligible.
1360 // This is not always optimal; it's just a greedy heuristic that
1361 // seems to produce good results most of the time.
1362 let conflicts_b = info.storage_conflicts.count(local_b);
1363 let (remove, other) =
1364 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1365 ineligible_locals.insert(remove);
1366 assignments[remove] = Ineligible(None);
1367 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1371 // Count the number of variants in use. If only one of them, then it is
1372 // impossible to overlap any locals in our layout. In this case it's
1373 // always better to make the remaining locals ineligible, so we can
1374 // lay them out with the other locals in the prefix and eliminate
1375 // unnecessary padding bytes.
1377 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1378 for assignment in &assignments {
1379 if let Assigned(idx) = assignment {
1380 used_variants.insert(*idx);
1383 if used_variants.count() < 2 {
1384 for assignment in assignments.iter_mut() {
1385 *assignment = Ineligible(None);
1387 ineligible_locals.insert_all();
1391 // Write down the order of our locals that will be promoted to the prefix.
1393 for (idx, local) in ineligible_locals.iter().enumerate() {
1394 assignments[local] = Ineligible(Some(idx as u32));
1397 debug!("generator saved local assignments: {:?}", assignments);
1399 (ineligible_locals, assignments)
1402 /// Compute the full generator layout.
1403 fn generator_layout(
1406 def_id: hir::def_id::DefId,
1407 substs: SubstsRef<'tcx>,
1408 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1409 use SavedLocalEligibility::*;
1412 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1414 let info = tcx.generator_layout(def_id);
1415 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1417 // Build a prefix layout, including "promoting" all ineligible
1418 // locals as part of the prefix. We compute the layout of all of
1419 // these fields at once to get optimal packing.
1420 let tag_index = substs.as_generator().prefix_tys().count();
1422 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1423 let max_discr = (info.variant_fields.len() - 1) as u128;
1424 let discr_int = Integer::fit_unsigned(max_discr);
1425 let discr_int_ty = discr_int.to_ty(tcx, false);
1426 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1427 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1428 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1430 let promoted_layouts = ineligible_locals
1432 .map(|local| subst_field(info.field_tys[local]))
1433 .map(|ty| tcx.mk_maybe_uninit(ty))
1434 .map(|ty| self.layout_of(ty));
1435 let prefix_layouts = substs
1438 .map(|ty| self.layout_of(ty))
1439 .chain(iter::once(Ok(tag_layout)))
1440 .chain(promoted_layouts)
1441 .collect::<Result<Vec<_>, _>>()?;
1442 let prefix = self.univariant_uninterned(
1445 &ReprOptions::default(),
1446 StructKind::AlwaysSized,
1449 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1451 // Split the prefix layout into the "outer" fields (upvars and
1452 // discriminant) and the "promoted" fields. Promoted fields will
1453 // get included in each variant that requested them in
1455 debug!("prefix = {:#?}", prefix);
1456 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1457 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1458 let mut inverse_memory_index = invert_mapping(&memory_index);
1460 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1461 // "outer" and "promoted" fields respectively.
1462 let b_start = (tag_index + 1) as u32;
1463 let offsets_b = offsets.split_off(b_start as usize);
1464 let offsets_a = offsets;
1466 // Disentangle the "a" and "b" components of `inverse_memory_index`
1467 // by preserving the order but keeping only one disjoint "half" each.
1468 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1469 let inverse_memory_index_b: Vec<_> =
1470 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1471 inverse_memory_index.retain(|&i| i < b_start);
1472 let inverse_memory_index_a = inverse_memory_index;
1474 // Since `inverse_memory_index_{a,b}` each only refer to their
1475 // respective fields, they can be safely inverted
1476 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1477 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1480 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1481 (outer_fields, offsets_b, memory_index_b)
1486 let mut size = prefix.size;
1487 let mut align = prefix.align;
1491 .map(|(index, variant_fields)| {
1492 // Only include overlap-eligible fields when we compute our variant layout.
1493 let variant_only_tys = variant_fields
1495 .filter(|local| match assignments[**local] {
1496 Unassigned => bug!(),
1497 Assigned(v) if v == index => true,
1498 Assigned(_) => bug!("assignment does not match variant"),
1499 Ineligible(_) => false,
1501 .map(|local| subst_field(info.field_tys[*local]));
1503 let mut variant = self.univariant_uninterned(
1506 .map(|ty| self.layout_of(ty))
1507 .collect::<Result<Vec<_>, _>>()?,
1508 &ReprOptions::default(),
1509 StructKind::Prefixed(prefix_size, prefix_align.abi),
1511 variant.variants = Variants::Single { index };
1513 let (offsets, memory_index) = match variant.fields {
1514 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1518 // Now, stitch the promoted and variant-only fields back together in
1519 // the order they are mentioned by our GeneratorLayout.
1520 // Because we only use some subset (that can differ between variants)
1521 // of the promoted fields, we can't just pick those elements of the
1522 // `promoted_memory_index` (as we'd end up with gaps).
1523 // So instead, we build an "inverse memory_index", as if all of the
1524 // promoted fields were being used, but leave the elements not in the
1525 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1526 // obtain a valid (bijective) mapping.
1527 const INVALID_FIELD_IDX: u32 = !0;
1528 let mut combined_inverse_memory_index =
1529 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1530 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1531 let combined_offsets = variant_fields
1535 let (offset, memory_index) = match assignments[*local] {
1536 Unassigned => bug!(),
1538 let (offset, memory_index) =
1539 offsets_and_memory_index.next().unwrap();
1540 (offset, promoted_memory_index.len() as u32 + memory_index)
1542 Ineligible(field_idx) => {
1543 let field_idx = field_idx.unwrap() as usize;
1544 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1547 combined_inverse_memory_index[memory_index as usize] = i as u32;
1552 // Remove the unused slots and invert the mapping to obtain the
1553 // combined `memory_index` (also see previous comment).
1554 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1555 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1557 variant.fields = FieldsShape::Arbitrary {
1558 offsets: combined_offsets,
1559 memory_index: combined_memory_index,
1562 size = size.max(variant.size);
1563 align = align.max(variant.align);
1566 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1568 size = size.align_to(align.abi);
1570 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1574 Abi::Aggregate { sized: true }
1577 let layout = tcx.intern_layout(Layout {
1578 variants: Variants::Multiple {
1580 tag_encoding: TagEncoding::Direct,
1581 tag_field: tag_index,
1584 fields: outer_fields,
1586 largest_niche: prefix.largest_niche,
1590 debug!("generator layout ({:?}): {:#?}", ty, layout);
1594 /// This is invoked by the `layout_raw` query to record the final
1595 /// layout of each type.
1597 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1598 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1599 // for dumping later.
1600 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1601 self.record_layout_for_printing_outlined(layout)
1605 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1606 // Ignore layouts that are done with non-empty environments or
1607 // non-monomorphic layouts, as the user only wants to see the stuff
1608 // resulting from the final codegen session.
1609 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1613 // (delay format until we actually need it)
1614 let record = |kind, packed, opt_discr_size, variants| {
1615 let type_desc = format!("{:?}", layout.ty);
1616 self.tcx.sess.code_stats.record_type_size(
1627 let adt_def = match layout.ty.kind {
1628 ty::Adt(ref adt_def, _) => {
1629 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1633 ty::Closure(..) => {
1634 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1635 record(DataTypeKind::Closure, false, None, vec![]);
1640 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1645 let adt_kind = adt_def.adt_kind();
1646 let adt_packed = adt_def.repr.pack.is_some();
1648 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1649 let mut min_size = Size::ZERO;
1650 let field_info: Vec<_> = flds
1653 .map(|(i, &name)| match layout.field(self, i) {
1655 bug!("no layout found for field {}: `{:?}`", name, err);
1657 Ok(field_layout) => {
1658 let offset = layout.fields.offset(i);
1659 let field_end = offset + field_layout.size;
1660 if min_size < field_end {
1661 min_size = field_end;
1664 name: name.to_string(),
1665 offset: offset.bytes(),
1666 size: field_layout.size.bytes(),
1667 align: field_layout.align.abi.bytes(),
1674 name: n.map(|n| n.to_string()),
1675 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1676 align: layout.align.abi.bytes(),
1677 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1682 match layout.variants {
1683 Variants::Single { index } => {
1684 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1685 if !adt_def.variants.is_empty() {
1686 let variant_def = &adt_def.variants[index];
1687 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1692 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1695 // (This case arises for *empty* enums; so give it
1697 record(adt_kind.into(), adt_packed, None, vec![]);
1701 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1703 "print-type-size `{:#?}` adt general variants def {}",
1705 adt_def.variants.len()
1707 let variant_infos: Vec<_> = adt_def
1710 .map(|(i, variant_def)| {
1711 let fields: Vec<_> =
1712 variant_def.fields.iter().map(|f| f.ident.name).collect();
1714 Some(variant_def.ident),
1716 layout.for_variant(self, i),
1723 match tag_encoding {
1724 TagEncoding::Direct => Some(tag.value.size(self)),
1734 /// Type size "skeleton", i.e., the only information determining a type's size.
1735 /// While this is conservative, (aside from constant sizes, only pointers,
1736 /// newtypes thereof and null pointer optimized enums are allowed), it is
1737 /// enough to statically check common use cases of transmute.
1738 #[derive(Copy, Clone, Debug)]
1739 pub enum SizeSkeleton<'tcx> {
1740 /// Any statically computable Layout.
1743 /// A potentially-fat pointer.
1745 /// If true, this pointer is never null.
1747 /// The type which determines the unsized metadata, if any,
1748 /// of this pointer. Either a type parameter or a projection
1749 /// depending on one, with regions erased.
1754 impl<'tcx> SizeSkeleton<'tcx> {
1758 param_env: ty::ParamEnv<'tcx>,
1759 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1760 debug_assert!(!ty.has_infer_types_or_consts());
1762 // First try computing a static layout.
1763 let err = match tcx.layout_of(param_env.and(ty)) {
1765 return Ok(SizeSkeleton::Known(layout.size));
1771 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1772 let non_zero = !ty.is_unsafe_ptr();
1773 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1775 ty::Param(_) | ty::Projection(_) => {
1776 debug_assert!(tail.has_param_types_or_consts());
1777 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
1780 "SizeSkeleton::compute({}): layout errored ({}), yet \
1781 tail `{}` is not a type parameter or a projection",
1789 ty::Adt(def, substs) => {
1790 // Only newtypes and enums w/ nullable pointer optimization.
1791 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1795 // Get a zero-sized variant or a pointer newtype.
1796 let zero_or_ptr_variant = |i| {
1797 let i = VariantIdx::new(i);
1798 let fields = def.variants[i]
1801 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1803 for field in fields {
1806 SizeSkeleton::Known(size) => {
1807 if size.bytes() > 0 {
1811 SizeSkeleton::Pointer { .. } => {
1822 let v0 = zero_or_ptr_variant(0)?;
1824 if def.variants.len() == 1 {
1825 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1826 return Ok(SizeSkeleton::Pointer {
1828 || match tcx.layout_scalar_valid_range(def.did) {
1829 (Bound::Included(start), Bound::Unbounded) => start > 0,
1830 (Bound::Included(start), Bound::Included(end)) => {
1831 0 < start && start < end
1842 let v1 = zero_or_ptr_variant(1)?;
1843 // Nullable pointer enum optimization.
1845 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1846 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1847 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1853 ty::Projection(_) | ty::Opaque(..) => {
1854 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1855 if ty == normalized {
1858 SizeSkeleton::compute(normalized, tcx, param_env)
1866 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1867 match (self, other) {
1868 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1869 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1877 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1878 fn tcx(&self) -> TyCtxt<'tcx>;
1881 pub trait HasParamEnv<'tcx> {
1882 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1885 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1886 fn data_layout(&self) -> &TargetDataLayout {
1891 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1892 fn tcx(&self) -> TyCtxt<'tcx> {
1897 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1898 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1903 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1904 fn data_layout(&self) -> &TargetDataLayout {
1905 self.tcx.data_layout()
1909 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1910 fn tcx(&self) -> TyCtxt<'tcx> {
1915 pub type TyAndLayout<'tcx> = ::rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1917 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1919 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1921 /// Computes the layout of a type. Note that this implicitly
1922 /// executes in "reveal all" mode.
1923 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1924 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1925 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1926 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1927 let layout = TyAndLayout { ty, layout };
1929 // N.B., this recording is normally disabled; when enabled, it
1930 // can however trigger recursive invocations of `layout_of`.
1931 // Therefore, we execute it *after* the main query has
1932 // completed, to avoid problems around recursive structures
1933 // and the like. (Admittedly, I wasn't able to reproduce a problem
1934 // here, but it seems like the right thing to do. -nmatsakis)
1935 self.record_layout_for_printing(layout);
1941 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1943 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1945 /// Computes the layout of a type. Note that this implicitly
1946 /// executes in "reveal all" mode.
1947 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1948 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
1949 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1950 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1951 let layout = TyAndLayout { ty, layout };
1953 // N.B., this recording is normally disabled; when enabled, it
1954 // can however trigger recursive invocations of `layout_of`.
1955 // Therefore, we execute it *after* the main query has
1956 // completed, to avoid problems around recursive structures
1957 // and the like. (Admittedly, I wasn't able to reproduce a problem
1958 // here, but it seems like the right thing to do. -nmatsakis)
1959 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
1960 cx.record_layout_for_printing(layout);
1966 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1968 /// Computes the layout of a type. Note that this implicitly
1969 /// executes in "reveal all" mode.
1973 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1974 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1975 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
1976 cx.layout_of(param_env_and_ty.value)
1980 impl ty::query::TyCtxtAt<'tcx> {
1981 /// Computes the layout of a type. Note that this implicitly
1982 /// executes in "reveal all" mode.
1986 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1987 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
1988 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
1989 cx.layout_of(param_env_and_ty.value)
1993 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
1995 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
1997 + HasParamEnv<'tcx>,
2000 this: TyAndLayout<'tcx>,
2002 variant_index: VariantIdx,
2003 ) -> TyAndLayout<'tcx> {
2004 let layout = match this.variants {
2005 Variants::Single { index }
2006 // If all variants but one are uninhabited, the variant layout is the enum layout.
2007 if index == variant_index &&
2008 // Don't confuse variants of uninhabited enums with the enum itself.
2009 // For more details see https://github.com/rust-lang/rust/issues/69763.
2010 this.fields != FieldsShape::Primitive =>
2015 Variants::Single { index } => {
2016 // Deny calling for_variant more than once for non-Single enums.
2017 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2018 assert_eq!(original_layout.variants, Variants::Single { index });
2021 let fields = match this.ty.kind {
2022 ty::Adt(def, _) if def.variants.is_empty() =>
2023 bug!("for_variant called on zero-variant enum"),
2024 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2028 tcx.intern_layout(Layout {
2029 variants: Variants::Single { index: variant_index },
2030 fields: match NonZeroUsize::new(fields) {
2031 Some(fields) => FieldsShape::Union(fields),
2032 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2034 abi: Abi::Uninhabited,
2035 largest_niche: None,
2036 align: tcx.data_layout.i8_align,
2041 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2044 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2046 TyAndLayout { ty: this.ty, layout }
2049 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2051 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2052 let layout = Layout::scalar(cx, tag.clone());
2053 MaybeResult::from(Ok(TyAndLayout {
2054 layout: tcx.intern_layout(layout),
2055 ty: tag.value.to_ty(tcx),
2059 cx.layout_of(match this.ty.kind {
2068 | ty::GeneratorWitness(..)
2070 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2072 // Potentially-fat pointers.
2073 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2074 assert!(i < this.fields.count());
2076 // Reuse the fat `*T` type as its own thin pointer data field.
2077 // This provides information about, e.g., DST struct pointees
2078 // (which may have no non-DST form), and will work as long
2079 // as the `Abi` or `FieldsShape` is checked by users.
2081 let nil = tcx.mk_unit();
2082 let ptr_ty = if this.ty.is_unsafe_ptr() {
2085 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2087 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2089 ptr_layout.ty = this.ty;
2095 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
2096 ty::Slice(_) | ty::Str => tcx.types.usize,
2097 ty::Dynamic(_, _) => {
2098 tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
2099 /* FIXME: use actual fn pointers
2100 Warning: naively computing the number of entries in the
2101 vtable by counting the methods on the trait + methods on
2102 all parent traits does not work, because some methods can
2103 be not object safe and thus excluded from the vtable.
2104 Increase this counter if you tried to implement this but
2105 failed to do it without duplicating a lot of code from
2106 other places in the compiler: 2
2108 tcx.mk_array(tcx.types.usize, 3),
2109 tcx.mk_array(Option<fn()>),
2113 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2117 // Arrays and slices.
2118 ty::Array(element, _) | ty::Slice(element) => element,
2119 ty::Str => tcx.types.u8,
2121 // Tuples, generators and closures.
2122 ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
2124 ty::Generator(def_id, ref substs, _) => match this.variants {
2125 Variants::Single { index } => substs
2127 .state_tys(def_id, tcx)
2128 .nth(index.as_usize())
2132 Variants::Multiple { ref tag, tag_field, .. } => {
2134 return tag_layout(tag);
2136 substs.as_generator().prefix_tys().nth(i).unwrap()
2140 ty::Tuple(tys) => tys[i].expect_ty(),
2142 // SIMD vector types.
2143 ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
2146 ty::Adt(def, substs) => {
2147 match this.variants {
2148 Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
2150 // Discriminant field for enums (where applicable).
2151 Variants::Multiple { ref tag, .. } => {
2153 return tag_layout(tag);
2160 | ty::Placeholder(..)
2164 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2168 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2169 let addr_space_of_ty = |ty: Ty<'tcx>| {
2170 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2173 let pointee_info = match this.ty.kind {
2174 ty::RawPtr(mt) if offset.bytes() == 0 => {
2175 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2177 align: layout.align.abi,
2179 address_space: addr_space_of_ty(mt.ty),
2182 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2183 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2186 align: layout.align.abi,
2188 address_space: cx.data_layout().instruction_address_space,
2192 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2193 let address_space = addr_space_of_ty(ty);
2195 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2196 let kind = match mt {
2197 hir::Mutability::Not => {
2204 hir::Mutability::Mut => {
2205 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2206 // panic=abort mode. That was deemed right, as prior versions had many bugs
2207 // in conjunction with unwinding, but later versions didn’t seem to have
2208 // said issues. See issue #31681.
2210 // Alas, later on we encountered a case where noalias would generate wrong
2211 // code altogether even with recent versions of LLVM in *safe* code with no
2212 // unwinding involved. See #54462.
2214 // For now, do not enable mutable_noalias by default at all, while the
2215 // issue is being figured out.
2216 if tcx.sess.opts.debugging_opts.mutable_noalias {
2217 PointerKind::UniqueBorrowed
2224 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2226 align: layout.align.abi,
2233 let mut data_variant = match this.variants {
2234 // Within the discriminant field, only the niche itself is
2235 // always initialized, so we only check for a pointer at its
2238 // If the niche is a pointer, it's either valid (according
2239 // to its type), or null (which the niche field's scalar
2240 // validity range encodes). This allows using
2241 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2242 // this will continue to work as long as we don't start
2243 // using more niches than just null (e.g., the first page of
2244 // the address space, or unaligned pointers).
2245 Variants::Multiple {
2246 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2249 } if this.fields.offset(tag_field) == offset => {
2250 Some(this.for_variant(cx, dataful_variant))
2255 if let Some(variant) = data_variant {
2256 // We're not interested in any unions.
2257 if let FieldsShape::Union(_) = variant.fields {
2258 data_variant = None;
2262 let mut result = None;
2264 if let Some(variant) = data_variant {
2265 let ptr_end = offset + Pointer.size(cx);
2266 for i in 0..variant.fields.count() {
2267 let field_start = variant.fields.offset(i);
2268 if field_start <= offset {
2269 let field = variant.field(cx, i);
2270 result = field.to_result().ok().and_then(|field| {
2271 if ptr_end <= field_start + field.size {
2272 // We found the right field, look inside it.
2274 field.pointee_info_at(cx, offset - field_start);
2280 if result.is_some() {
2287 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2288 if let Some(ref mut pointee) = result {
2289 if let ty::Adt(def, _) = this.ty.kind {
2290 if def.is_box() && offset.bytes() == 0 {
2291 pointee.safe = Some(PointerKind::UniqueOwned);
2301 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2302 offset, this.ty.kind, pointee_info
2309 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2310 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2311 use crate::ty::layout::LayoutError::*;
2312 mem::discriminant(self).hash_stable(hcx, hasher);
2315 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2320 impl<'tcx> ty::Instance<'tcx> {
2321 // NOTE(eddyb) this is private to avoid using it from outside of
2322 // `FnAbi::of_instance` - any other uses are either too high-level
2323 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2324 // or should go through `FnAbi` instead, to avoid losing any
2325 // adjustments `FnAbi::of_instance` might be performing.
2326 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2327 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2328 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2331 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2332 // parameters unused if they show up in the signature, but not in the `mir::Body`
2333 // (i.e. due to being inside a projection that got normalized, see
2334 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2335 // track of a polymorphization `ParamEnv` to allow normalizing later.
2336 let mut sig = match ty.kind {
2337 ty::FnDef(def_id, substs) => tcx
2338 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2339 .subst(tcx, substs),
2340 _ => unreachable!(),
2343 if let ty::InstanceDef::VtableShim(..) = self.def {
2344 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2345 sig = sig.map_bound(|mut sig| {
2346 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2347 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2348 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2354 ty::Closure(def_id, substs) => {
2355 let sig = substs.as_closure().sig();
2357 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2358 sig.map_bound(|sig| {
2360 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2368 ty::Generator(_, substs, _) => {
2369 let sig = substs.as_generator().poly_sig();
2371 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2372 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2374 let pin_did = tcx.require_lang_item(PinTypeLangItem, None);
2375 let pin_adt_ref = tcx.adt_def(pin_did);
2376 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2377 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2379 sig.map_bound(|sig| {
2380 let state_did = tcx.require_lang_item(GeneratorStateLangItem, None);
2381 let state_adt_ref = tcx.adt_def(state_did);
2383 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2384 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2387 [env_ty, sig.resume_ty].iter(),
2390 hir::Unsafety::Normal,
2391 rustc_target::spec::abi::Abi::Rust,
2395 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2400 pub trait FnAbiExt<'tcx, C>
2402 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2406 + HasParamEnv<'tcx>,
2408 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2410 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2411 /// instead, where the instance is a `InstanceDef::Virtual`.
2412 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2414 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2415 /// direct calls to an `fn`.
2417 /// NB: that includes virtual calls, which are represented by "direct calls"
2418 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2419 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2423 sig: ty::PolyFnSig<'tcx>,
2424 extra_args: &[Ty<'tcx>],
2425 caller_location: Option<Ty<'tcx>>,
2426 codegen_fn_attr_flags: CodegenFnAttrFlags,
2427 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2429 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2433 panic_strategy: PanicStrategy,
2434 codegen_fn_attr_flags: CodegenFnAttrFlags,
2437 if panic_strategy != PanicStrategy::Unwind {
2438 // In panic=abort mode we assume nothing can unwind anywhere, so
2439 // optimize based on this!
2441 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2442 // If a specific #[unwind] attribute is present, use that.
2444 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2445 // Special attribute for allocator functions, which can't unwind.
2448 if call_conv == Conv::Rust {
2449 // Any Rust method (or `extern "Rust" fn` or `extern
2450 // "rust-call" fn`) is explicitly allowed to unwind
2451 // (unless it has no-unwind attribute, handled above).
2454 // Anything else is either:
2456 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2458 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2460 // Foreign items (case 1) are assumed to not unwind; it is
2461 // UB otherwise. (At least for now; see also
2462 // rust-lang/rust#63909 and Rust RFC 2753.)
2464 // Items defined in Rust with non-Rust ABIs (case 2) are also
2465 // not supposed to unwind. Whether this should be enforced
2466 // (versus stating it is UB) and *how* it would be enforced
2467 // is currently under discussion; see rust-lang/rust#58794.
2469 // In either case, we mark item as explicitly nounwind.
2475 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2477 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2481 + HasParamEnv<'tcx>,
2483 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2484 // Assume that fn pointers may always unwind
2485 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2487 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2488 ArgAbi::new(cx.layout_of(ty))
2492 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2493 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2495 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2496 Some(cx.tcx().caller_location_ty())
2501 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2503 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2504 let mut layout = cx.layout_of(ty);
2505 // Don't pass the vtable, it's not an argument of the virtual fn.
2506 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2507 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2508 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2509 let fat_pointer_ty = if layout.is_unsized() {
2510 // unsized `self` is passed as a pointer to `self`
2511 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2512 cx.tcx().mk_mut_ptr(layout.ty)
2515 Abi::ScalarPair(..) => (),
2516 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2519 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2520 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2521 // elsewhere in the compiler as a method on a `dyn Trait`.
2522 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2523 // get a built-in pointer type
2524 let mut fat_pointer_layout = layout;
2525 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2526 && !fat_pointer_layout.ty.is_region_ptr()
2528 for i in 0..fat_pointer_layout.fields.count() {
2529 let field_layout = fat_pointer_layout.field(cx, i);
2531 if !field_layout.is_zst() {
2532 fat_pointer_layout = field_layout;
2533 continue 'descend_newtypes;
2537 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2540 fat_pointer_layout.ty
2543 // we now have a type like `*mut RcBox<dyn Trait>`
2544 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2545 // this is understood as a special case elsewhere in the compiler
2546 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2547 layout = cx.layout_of(unit_pointer_ty);
2548 layout.ty = fat_pointer_ty;
2556 sig: ty::PolyFnSig<'tcx>,
2557 extra_args: &[Ty<'tcx>],
2558 caller_location: Option<Ty<'tcx>>,
2559 codegen_fn_attr_flags: CodegenFnAttrFlags,
2560 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2562 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2564 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2566 use rustc_target::spec::abi::Abi::*;
2567 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2568 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2570 // It's the ABI's job to select this, not ours.
2571 System => bug!("system abi should be selected elsewhere"),
2572 EfiApi => bug!("eficall abi should be selected elsewhere"),
2574 Stdcall => Conv::X86Stdcall,
2575 Fastcall => Conv::X86Fastcall,
2576 Vectorcall => Conv::X86VectorCall,
2577 Thiscall => Conv::X86ThisCall,
2579 Unadjusted => Conv::C,
2580 Win64 => Conv::X86_64Win64,
2581 SysV64 => Conv::X86_64SysV,
2582 Aapcs => Conv::ArmAapcs,
2583 PtxKernel => Conv::PtxKernel,
2584 Msp430Interrupt => Conv::Msp430Intr,
2585 X86Interrupt => Conv::X86Intr,
2586 AmdGpuKernel => Conv::AmdGpuKernel,
2587 AvrInterrupt => Conv::AvrInterrupt,
2588 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2590 // These API constants ought to be more specific...
2594 let mut inputs = sig.inputs();
2595 let extra_args = if sig.abi == RustCall {
2596 assert!(!sig.c_variadic && extra_args.is_empty());
2598 if let Some(input) = sig.inputs().last() {
2599 if let ty::Tuple(tupled_arguments) = input.kind {
2600 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2601 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2604 "argument to function with \"rust-call\" ABI \
2610 "argument to function with \"rust-call\" ABI \
2615 assert!(sig.c_variadic || extra_args.is_empty());
2619 let target = &cx.tcx().sess.target.target;
2620 let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
2622 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2623 let linux_s390x_gnu_like =
2624 target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2625 let linux_sparc64_gnu_like =
2626 target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2627 let linux_powerpc_gnu_like =
2628 target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2629 let rust_abi = match sig.abi {
2630 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2634 // Handle safe Rust thin and fat pointers.
2635 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2637 layout: TyAndLayout<'tcx>,
2640 // Booleans are always an i1 that needs to be zero-extended.
2641 if scalar.is_bool() {
2642 attrs.set(ArgAttribute::ZExt);
2646 // Only pointer types handled below.
2647 if scalar.value != Pointer {
2651 if scalar.valid_range.start() < scalar.valid_range.end() {
2652 if *scalar.valid_range.start() > 0 {
2653 attrs.set(ArgAttribute::NonNull);
2657 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2658 if let Some(kind) = pointee.safe {
2659 attrs.pointee_align = Some(pointee.align);
2661 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2662 // for the entire duration of the function as they can be deallocated
2663 // at any time. Set their valid size to 0.
2664 attrs.pointee_size = match kind {
2665 PointerKind::UniqueOwned => Size::ZERO,
2669 // `Box` pointer parameters never alias because ownership is transferred
2670 // `&mut` pointer parameters never alias other parameters,
2671 // or mutable global data
2673 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2674 // and can be marked as both `readonly` and `noalias`, as
2675 // LLVM's definition of `noalias` is based solely on memory
2676 // dependencies rather than pointer equality
2677 let no_alias = match kind {
2678 PointerKind::Shared => false,
2679 PointerKind::UniqueOwned => true,
2680 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2683 attrs.set(ArgAttribute::NoAlias);
2686 if kind == PointerKind::Frozen && !is_return {
2687 attrs.set(ArgAttribute::ReadOnly);
2693 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2694 let is_return = arg_idx.is_none();
2695 let mut arg = mk_arg_type(ty, arg_idx);
2696 if arg.layout.is_zst() {
2697 // For some forsaken reason, x86_64-pc-windows-gnu
2698 // doesn't ignore zero-sized struct arguments.
2699 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2703 && !linux_s390x_gnu_like
2704 && !linux_sparc64_gnu_like
2705 && !linux_powerpc_gnu_like)
2707 arg.mode = PassMode::Ignore;
2711 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2712 if !is_return && rust_abi {
2713 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2714 let mut a_attrs = ArgAttributes::new();
2715 let mut b_attrs = ArgAttributes::new();
2716 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2717 adjust_for_rust_scalar(
2721 a.value.size(cx).align_to(b.value.align(cx).abi),
2724 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2729 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2730 if let PassMode::Direct(ref mut attrs) = arg.mode {
2731 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2738 let mut fn_abi = FnAbi {
2739 ret: arg_of(sig.output(), None),
2744 .chain(caller_location)
2746 .map(|(i, ty)| arg_of(ty, Some(i)))
2748 c_variadic: sig.c_variadic,
2749 fixed_count: inputs.len(),
2751 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2753 fn_abi.adjust_for_abi(cx, sig.abi);
2757 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2758 if abi == SpecAbi::Unadjusted {
2762 if abi == SpecAbi::Rust
2763 || abi == SpecAbi::RustCall
2764 || abi == SpecAbi::RustIntrinsic
2765 || abi == SpecAbi::PlatformIntrinsic
2767 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2768 if arg.is_ignore() {
2772 match arg.layout.abi {
2773 Abi::Aggregate { .. } => {}
2775 // This is a fun case! The gist of what this is doing is
2776 // that we want callers and callees to always agree on the
2777 // ABI of how they pass SIMD arguments. If we were to *not*
2778 // make these arguments indirect then they'd be immediates
2779 // in LLVM, which means that they'd used whatever the
2780 // appropriate ABI is for the callee and the caller. That
2781 // means, for example, if the caller doesn't have AVX
2782 // enabled but the callee does, then passing an AVX argument
2783 // across this boundary would cause corrupt data to show up.
2785 // This problem is fixed by unconditionally passing SIMD
2786 // arguments through memory between callers and callees
2787 // which should get them all to agree on ABI regardless of
2788 // target feature sets. Some more information about this
2789 // issue can be found in #44367.
2791 // Note that the platform intrinsic ABI is exempt here as
2792 // that's how we connect up to LLVM and it's unstable
2793 // anyway, we control all calls to it in libstd.
2795 if abi != SpecAbi::PlatformIntrinsic
2796 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2798 arg.make_indirect();
2805 let size = arg.layout.size;
2806 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2807 arg.make_indirect();
2809 // We want to pass small aggregates as immediates, but using
2810 // a LLVM aggregate type for this leads to bad optimizations,
2811 // so we pick an appropriately sized integer type instead.
2812 arg.cast_to(Reg { kind: RegKind::Integer, size });
2815 fixup(&mut self.ret);
2816 for arg in &mut self.args {
2819 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2820 attrs.set(ArgAttribute::StructRet);
2825 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2826 cx.tcx().sess.fatal(&msg);