1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44 impl IntegerExt for Integer {
46 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
47 match (*self, signed) {
48 (I8, false) => tcx.types.u8,
49 (I16, false) => tcx.types.u16,
50 (I32, false) => tcx.types.u32,
51 (I64, false) => tcx.types.u64,
52 (I128, false) => tcx.types.u128,
53 (I8, true) => tcx.types.i8,
54 (I16, true) => tcx.types.i16,
55 (I32, true) => tcx.types.i32,
56 (I64, true) => tcx.types.i64,
57 (I128, true) => tcx.types.i128,
61 /// Gets the Integer type from an attr::IntType.
62 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
63 let dl = cx.data_layout();
66 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
67 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
68 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
69 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
70 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
71 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
72 dl.ptr_sized_integer()
77 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
80 ty::IntTy::I16 => I16,
81 ty::IntTy::I32 => I32,
82 ty::IntTy::I64 => I64,
83 ty::IntTy::I128 => I128,
84 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
87 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
90 ty::UintTy::U16 => I16,
91 ty::UintTy::U32 => I32,
92 ty::UintTy::U64 => I64,
93 ty::UintTy::U128 => I128,
94 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
98 /// Finds the appropriate Integer type and signedness for the given
99 /// signed discriminant range and `#[repr]` attribute.
100 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
101 /// that shouldn't affect anything, other than maybe debuginfo.
108 ) -> (Integer, bool) {
109 // Theoretically, negative values could be larger in unsigned representation
110 // than the unsigned representation of the signed minimum. However, if there
111 // are any negative values, the only valid unsigned representation is u128
112 // which can fit all i128 values, so the result remains unaffected.
113 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
114 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
116 if let Some(ity) = repr.int {
117 let discr = Integer::from_attr(&tcx, ity);
118 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
121 "Integer::repr_discr: `#[repr]` hint too small for \
122 discriminant range of enum `{}",
126 return (discr, ity.is_signed());
129 let at_least = if repr.c() {
130 // This is usually I32, however it can be different on some platforms,
131 // notably hexagon and arm-none/thumb-none
132 tcx.data_layout().c_enum_min_size
134 // repr(Rust) enums try to be as small as possible
138 // If there are no negative values, we can use the unsigned fit.
140 (cmp::max(unsigned_fit, at_least), false)
142 (cmp::max(signed_fit, at_least), true)
147 pub trait PrimitiveExt {
148 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
149 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
152 impl PrimitiveExt for Primitive {
154 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
156 Int(i, signed) => i.to_ty(tcx, signed),
157 F32 => tcx.types.f32,
158 F64 => tcx.types.f64,
159 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
163 /// Return an *integer* type matching this primitive.
164 /// Useful in particular when dealing with enum discriminants.
166 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
168 Int(i, signed) => i.to_ty(tcx, signed),
169 Pointer => tcx.types.usize,
170 F32 | F64 => bug!("floats do not have an int type"),
175 /// The first half of a fat pointer.
177 /// - For a trait object, this is the address of the box.
178 /// - For a slice, this is the base address.
179 pub const FAT_PTR_ADDR: usize = 0;
181 /// The second half of a fat pointer.
183 /// - For a trait object, this is the address of the vtable.
184 /// - For a slice, this is the length.
185 pub const FAT_PTR_EXTRA: usize = 1;
187 /// The maximum supported number of lanes in a SIMD vector.
189 /// This value is selected based on backend support:
190 /// * LLVM does not appear to have a vector width limit.
191 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
192 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
194 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
195 pub enum LayoutError<'tcx> {
197 SizeOverflow(Ty<'tcx>),
200 impl<'tcx> fmt::Display for LayoutError<'tcx> {
201 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
203 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
204 LayoutError::SizeOverflow(ty) => {
205 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
214 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
215 ty::tls::with_related_context(tcx, move |icx| {
216 let (param_env, ty) = query.into_parts();
218 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
219 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
222 // Update the ImplicitCtxt to increase the layout_depth
223 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
225 ty::tls::enter_context(&icx, |_| {
226 let param_env = param_env.with_reveal_all_normalized(tcx);
227 let unnormalized_ty = ty;
228 let ty = tcx.normalize_erasing_regions(param_env, ty);
229 if ty != unnormalized_ty {
230 // Ensure this layout is also cached for the normalized type.
231 return tcx.layout_of(param_env.and(ty));
234 let cx = LayoutCx { tcx, param_env };
236 let layout = cx.layout_of_uncached(ty)?;
237 let layout = TyAndLayout { ty, layout };
239 cx.record_layout_for_printing(layout);
241 // Type-level uninhabitedness should always imply ABI uninhabitedness.
242 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
243 assert!(layout.abi.is_uninhabited());
251 pub fn provide(providers: &mut ty::query::Providers) {
252 *providers = ty::query::Providers { layout_of, ..*providers };
255 pub struct LayoutCx<'tcx, C> {
257 pub param_env: ty::ParamEnv<'tcx>,
260 #[derive(Copy, Clone, Debug)]
262 /// A tuple, closure, or univariant which cannot be coerced to unsized.
264 /// A univariant, the last field of which may be coerced to unsized.
266 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
267 Prefixed(Size, Align),
270 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
271 // This is used to go between `memory_index` (source field order to memory order)
272 // and `inverse_memory_index` (memory order to source field order).
273 // See also `FieldsShape::Arbitrary::memory_index` for more details.
274 // FIXME(eddyb) build a better abstraction for permutations, if possible.
275 fn invert_mapping(map: &[u32]) -> Vec<u32> {
276 let mut inverse = vec![0; map.len()];
277 for i in 0..map.len() {
278 inverse[map[i] as usize] = i as u32;
283 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
284 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
285 let dl = self.data_layout();
286 let b_align = b.value.align(dl);
287 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
288 let b_offset = a.value.size(dl).align_to(b_align.abi);
289 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
291 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
292 // returns the last maximum.
293 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
295 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
296 .max_by_key(|niche| niche.available(dl));
299 variants: Variants::Single { index: VariantIdx::new(0) },
300 fields: FieldsShape::Arbitrary {
301 offsets: vec![Size::ZERO, b_offset],
302 memory_index: vec![0, 1],
304 abi: Abi::ScalarPair(a, b),
311 fn univariant_uninterned(
314 fields: &[TyAndLayout<'_>],
317 ) -> Result<Layout, LayoutError<'tcx>> {
318 let dl = self.data_layout();
319 let pack = repr.pack;
320 if pack.is_some() && repr.align.is_some() {
321 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
322 return Err(LayoutError::Unknown(ty));
325 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
327 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
329 let optimize = !repr.inhibit_struct_field_reordering_opt();
332 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
333 let optimizing = &mut inverse_memory_index[..end];
334 let field_align = |f: &TyAndLayout<'_>| {
335 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
338 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
339 optimizing.sort_by_key(|&x| {
340 // Place ZSTs first to avoid "interesting offsets",
341 // especially with only one or two non-ZST fields.
342 let f = &fields[x as usize];
343 (!f.is_zst(), cmp::Reverse(field_align(f)))
346 StructKind::Prefixed(..) => {
347 // Sort in ascending alignment so that the layout stay optimal
348 // regardless of the prefix
349 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
354 // inverse_memory_index holds field indices by increasing memory offset.
355 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
356 // We now write field offsets to the corresponding offset slot;
357 // field 5 with offset 0 puts 0 in offsets[5].
358 // At the bottom of this function, we invert `inverse_memory_index` to
359 // produce `memory_index` (see `invert_mapping`).
361 let mut sized = true;
362 let mut offsets = vec![Size::ZERO; fields.len()];
363 let mut offset = Size::ZERO;
364 let mut largest_niche = None;
365 let mut largest_niche_available = 0;
367 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
369 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
370 align = align.max(AbiAndPrefAlign::new(prefix_align));
371 offset = prefix_size.align_to(prefix_align);
374 for &i in &inverse_memory_index {
375 let field = fields[i as usize];
377 self.tcx.sess.delay_span_bug(
380 "univariant: field #{} of `{}` comes after unsized field",
387 if field.is_unsized() {
391 // Invariant: offset < dl.obj_size_bound() <= 1<<61
392 let field_align = if let Some(pack) = pack {
393 field.align.min(AbiAndPrefAlign::new(pack))
397 offset = offset.align_to(field_align.abi);
398 align = align.max(field_align);
400 debug!("univariant offset: {:?} field: {:#?}", offset, field);
401 offsets[i as usize] = offset;
403 if !repr.hide_niche() {
404 if let Some(mut niche) = field.largest_niche.clone() {
405 let available = niche.available(dl);
406 if available > largest_niche_available {
407 largest_niche_available = available;
408 niche.offset += offset;
409 largest_niche = Some(niche);
414 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
417 if let Some(repr_align) = repr.align {
418 align = align.max(AbiAndPrefAlign::new(repr_align));
421 debug!("univariant min_size: {:?}", offset);
422 let min_size = offset;
424 // As stated above, inverse_memory_index holds field indices by increasing offset.
425 // This makes it an already-sorted view of the offsets vec.
426 // To invert it, consider:
427 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
428 // Field 5 would be the first element, so memory_index is i:
429 // Note: if we didn't optimize, it's already right.
432 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
434 let size = min_size.align_to(align.abi);
435 let mut abi = Abi::Aggregate { sized };
437 // Unpack newtype ABIs and find scalar pairs.
438 if sized && size.bytes() > 0 {
439 // All other fields must be ZSTs.
440 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
442 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
443 // We have exactly one non-ZST field.
444 (Some((i, field)), None, None) => {
445 // Field fills the struct and it has a scalar or scalar pair ABI.
446 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
449 // For plain scalars, or vectors of them, we can't unpack
450 // newtypes for `#[repr(C)]`, as that affects C ABIs.
451 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
452 abi = field.abi.clone();
454 // But scalar pairs are Rust-specific and get
455 // treated as aggregates by C ABIs anyway.
456 Abi::ScalarPair(..) => {
457 abi = field.abi.clone();
464 // Two non-ZST fields, and they're both scalars.
466 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
467 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
470 // Order by the memory placement, not source order.
471 let ((i, a), (j, b)) =
472 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
473 let pair = self.scalar_pair(a.clone(), b.clone());
474 let pair_offsets = match pair.fields {
475 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
476 assert_eq!(memory_index, &[0, 1]);
481 if offsets[i] == pair_offsets[0]
482 && offsets[j] == pair_offsets[1]
483 && align == pair.align
486 // We can use `ScalarPair` only when it matches our
487 // already computed layout (including `#[repr(C)]`).
496 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
497 abi = Abi::Uninhabited;
501 variants: Variants::Single { index: VariantIdx::new(0) },
502 fields: FieldsShape::Arbitrary { offsets, memory_index },
510 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
512 let param_env = self.param_env;
513 let dl = self.data_layout();
514 let scalar_unit = |value: Primitive| {
515 let bits = value.size(dl).bits();
516 assert!(bits <= 128);
517 Scalar { value, valid_range: WrappingRange { start: 0, end: (!0 >> (128 - bits)) } }
519 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
521 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
522 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
524 debug_assert!(!ty.has_infer_types_or_consts());
526 Ok(match *ty.kind() {
528 ty::Bool => tcx.intern_layout(Layout::scalar(
530 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
532 ty::Char => tcx.intern_layout(Layout::scalar(
535 value: Int(I32, false),
536 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
539 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
540 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
541 ty::Float(fty) => scalar(match fty {
542 ty::FloatTy::F32 => F32,
543 ty::FloatTy::F64 => F64,
546 let mut ptr = scalar_unit(Pointer);
547 ptr.valid_range = ptr.valid_range.with_start(1);
548 tcx.intern_layout(Layout::scalar(self, ptr))
552 ty::Never => tcx.intern_layout(Layout {
553 variants: Variants::Single { index: VariantIdx::new(0) },
554 fields: FieldsShape::Primitive,
555 abi: Abi::Uninhabited,
561 // Potentially-wide pointers.
562 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
563 let mut data_ptr = scalar_unit(Pointer);
564 if !ty.is_unsafe_ptr() {
565 data_ptr.valid_range = data_ptr.valid_range.with_start(1);
568 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
569 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
570 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
573 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
574 let metadata = match unsized_part.kind() {
576 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
578 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
580 let mut vtable = scalar_unit(Pointer);
581 vtable.valid_range = vtable.valid_range.with_start(1);
584 _ => return Err(LayoutError::Unknown(unsized_part)),
587 // Effectively a (ptr, meta) tuple.
588 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
591 // Arrays and slices.
592 ty::Array(element, mut count) => {
593 if count.has_projections() {
594 count = tcx.normalize_erasing_regions(param_env, count);
595 if count.has_projections() {
596 return Err(LayoutError::Unknown(ty));
600 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
601 let element = self.layout_of(element)?;
603 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
606 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
609 Abi::Aggregate { sized: true }
612 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
614 tcx.intern_layout(Layout {
615 variants: Variants::Single { index: VariantIdx::new(0) },
616 fields: FieldsShape::Array { stride: element.size, count },
619 align: element.align,
623 ty::Slice(element) => {
624 let element = self.layout_of(element)?;
625 tcx.intern_layout(Layout {
626 variants: Variants::Single { index: VariantIdx::new(0) },
627 fields: FieldsShape::Array { stride: element.size, count: 0 },
628 abi: Abi::Aggregate { sized: false },
630 align: element.align,
634 ty::Str => tcx.intern_layout(Layout {
635 variants: Variants::Single { index: VariantIdx::new(0) },
636 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
637 abi: Abi::Aggregate { sized: false },
644 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
645 ty::Dynamic(..) | ty::Foreign(..) => {
646 let mut unit = self.univariant_uninterned(
649 &ReprOptions::default(),
650 StructKind::AlwaysSized,
653 Abi::Aggregate { ref mut sized } => *sized = false,
656 tcx.intern_layout(unit)
659 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
661 ty::Closure(_, ref substs) => {
662 let tys = substs.as_closure().upvar_tys();
664 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
665 &ReprOptions::default(),
666 StructKind::AlwaysSized,
672 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
676 .map(|k| self.layout_of(k.expect_ty()))
677 .collect::<Result<Vec<_>, _>>()?,
678 &ReprOptions::default(),
683 // SIMD vector types.
684 ty::Adt(def, substs) if def.repr.simd() => {
685 if !def.is_struct() {
686 // Should have yielded E0517 by now.
687 tcx.sess.delay_span_bug(
689 "#[repr(simd)] was applied to an ADT that is not a struct",
691 return Err(LayoutError::Unknown(ty));
694 // Supported SIMD vectors are homogeneous ADTs with at least one field:
696 // * #[repr(simd)] struct S(T, T, T, T);
697 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
698 // * #[repr(simd)] struct S([T; 4])
700 // where T is a primitive scalar (integer/float/pointer).
702 // SIMD vectors with zero fields are not supported.
703 // (should be caught by typeck)
704 if def.non_enum_variant().fields.is_empty() {
705 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
708 // Type of the first ADT field:
709 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
711 // Heterogeneous SIMD vectors are not supported:
712 // (should be caught by typeck)
713 for fi in &def.non_enum_variant().fields {
714 if fi.ty(tcx, substs) != f0_ty {
715 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
719 // The element type and number of elements of the SIMD vector
720 // are obtained from:
722 // * the element type and length of the single array field, if
723 // the first field is of array type, or
725 // * the homogenous field type and the number of fields.
726 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
727 // First ADT field is an array:
729 // SIMD vectors with multiple array fields are not supported:
730 // (should be caught by typeck)
731 if def.non_enum_variant().fields.len() != 1 {
732 tcx.sess.fatal(&format!(
733 "monomorphising SIMD type `{}` with more than one array field",
738 // Extract the number of elements from the layout of the array field:
739 let len = if let Ok(TyAndLayout {
740 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
742 }) = self.layout_of(f0_ty)
746 return Err(LayoutError::Unknown(ty));
751 // First ADT field is not an array:
752 (f0_ty, def.non_enum_variant().fields.len() as _, false)
755 // SIMD vectors of zero length are not supported.
756 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
759 // Can't be caught in typeck if the array length is generic.
761 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
762 } else if e_len > MAX_SIMD_LANES {
763 tcx.sess.fatal(&format!(
764 "monomorphising SIMD type `{}` of length greater than {}",
769 // Compute the ABI of the element type:
770 let e_ly = self.layout_of(e_ty)?;
771 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
774 // This error isn't caught in typeck, e.g., if
775 // the element type of the vector is generic.
776 tcx.sess.fatal(&format!(
777 "monomorphising SIMD type `{}` with a non-primitive-scalar \
778 (integer/float/pointer) element type `{}`",
783 // Compute the size and alignment of the vector:
784 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
785 let align = dl.vector_align(size);
786 let size = size.align_to(align.abi);
788 // Compute the placement of the vector fields:
789 let fields = if is_array {
790 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
792 FieldsShape::Array { stride: e_ly.size, count: e_len }
795 tcx.intern_layout(Layout {
796 variants: Variants::Single { index: VariantIdx::new(0) },
798 abi: Abi::Vector { element: e_abi, count: e_len },
799 largest_niche: e_ly.largest_niche.clone(),
806 ty::Adt(def, substs) => {
807 // Cache the field layouts.
814 .map(|field| self.layout_of(field.ty(tcx, substs)))
815 .collect::<Result<Vec<_>, _>>()
817 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
820 if def.repr.pack.is_some() && def.repr.align.is_some() {
821 self.tcx.sess.delay_span_bug(
822 tcx.def_span(def.did),
823 "union cannot be packed and aligned",
825 return Err(LayoutError::Unknown(ty));
829 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
831 if let Some(repr_align) = def.repr.align {
832 align = align.max(AbiAndPrefAlign::new(repr_align));
835 let optimize = !def.repr.inhibit_union_abi_opt();
836 let mut size = Size::ZERO;
837 let mut abi = Abi::Aggregate { sized: true };
838 let index = VariantIdx::new(0);
839 for field in &variants[index] {
840 assert!(!field.is_unsized());
841 align = align.max(field.align);
843 // If all non-ZST fields have the same ABI, forward this ABI
844 if optimize && !field.is_zst() {
845 // Normalize scalar_unit to the maximal valid range
846 let field_abi = match &field.abi {
847 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
848 Abi::ScalarPair(x, y) => {
849 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
851 Abi::Vector { element: x, count } => {
852 Abi::Vector { element: scalar_unit(x.value), count: *count }
854 Abi::Uninhabited | Abi::Aggregate { .. } => {
855 Abi::Aggregate { sized: true }
859 if size == Size::ZERO {
860 // first non ZST: initialize 'abi'
862 } else if abi != field_abi {
863 // different fields have different ABI: reset to Aggregate
864 abi = Abi::Aggregate { sized: true };
868 size = cmp::max(size, field.size);
871 if let Some(pack) = def.repr.pack {
872 align = align.min(AbiAndPrefAlign::new(pack));
875 return Ok(tcx.intern_layout(Layout {
876 variants: Variants::Single { index },
877 fields: FieldsShape::Union(
878 NonZeroUsize::new(variants[index].len())
879 .ok_or(LayoutError::Unknown(ty))?,
884 size: size.align_to(align.abi),
888 // A variant is absent if it's uninhabited and only has ZST fields.
889 // Present uninhabited variants only require space for their fields,
890 // but *not* an encoding of the discriminant (e.g., a tag value).
891 // See issue #49298 for more details on the need to leave space
892 // for non-ZST uninhabited data (mostly partial initialization).
893 let absent = |fields: &[TyAndLayout<'_>]| {
894 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
895 let is_zst = fields.iter().all(|f| f.is_zst());
896 uninhabited && is_zst
898 let (present_first, present_second) = {
899 let mut present_variants = variants
901 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
902 (present_variants.next(), present_variants.next())
904 let present_first = match present_first {
905 Some(present_first) => present_first,
906 // Uninhabited because it has no variants, or only absent ones.
907 None if def.is_enum() => {
908 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
910 // If it's a struct, still compute a layout so that we can still compute the
912 None => VariantIdx::new(0),
915 let is_struct = !def.is_enum() ||
916 // Only one variant is present.
917 (present_second.is_none() &&
918 // Representation optimizations are allowed.
919 !def.repr.inhibit_enum_layout_opt());
921 // Struct, or univariant enum equivalent to a struct.
922 // (Typechecking will reject discriminant-sizing attrs.)
924 let v = present_first;
925 let kind = if def.is_enum() || variants[v].is_empty() {
926 StructKind::AlwaysSized
928 let param_env = tcx.param_env(def.did);
929 let last_field = def.variants[v].fields.last().unwrap();
931 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
933 StructKind::MaybeUnsized
935 StructKind::AlwaysSized
939 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
940 st.variants = Variants::Single { index: v };
941 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
943 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
944 // the asserts ensure that we are not using the
945 // `#[rustc_layout_scalar_valid_range(n)]`
946 // attribute to widen the range of anything as that would probably
947 // result in UB somewhere
948 // FIXME(eddyb) the asserts are probably not needed,
949 // as larger validity ranges would result in missed
950 // optimizations, *not* wrongly assuming the inner
951 // value is valid. e.g. unions enlarge validity ranges,
952 // because the values may be uninitialized.
953 if let Bound::Included(start) = start {
954 // FIXME(eddyb) this might be incorrect - it doesn't
955 // account for wrap-around (end < start) ranges.
956 assert!(scalar.valid_range.start <= start);
957 scalar.valid_range.start = start;
959 if let Bound::Included(end) = end {
960 // FIXME(eddyb) this might be incorrect - it doesn't
961 // account for wrap-around (end < start) ranges.
962 assert!(scalar.valid_range.end >= end);
963 scalar.valid_range.end = end;
966 // Update `largest_niche` if we have introduced a larger niche.
967 let niche = if def.repr.hide_niche() {
970 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
972 if let Some(niche) = niche {
973 match &st.largest_niche {
974 Some(largest_niche) => {
975 // Replace the existing niche even if they're equal,
976 // because this one is at a lower offset.
977 if largest_niche.available(dl) <= niche.available(dl) {
978 st.largest_niche = Some(niche);
981 None => st.largest_niche = Some(niche),
986 start == Bound::Unbounded && end == Bound::Unbounded,
987 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
993 return Ok(tcx.intern_layout(st));
996 // At this point, we have handled all unions and
997 // structs. (We have also handled univariant enums
998 // that allow representation optimization.)
999 assert!(def.is_enum());
1001 // The current code for niche-filling relies on variant indices
1002 // instead of actual discriminants, so dataful enums with
1003 // explicit discriminants (RFC #2363) would misbehave.
1004 let no_explicit_discriminants = def
1007 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1009 let mut niche_filling_layout = None;
1011 // Niche-filling enum optimization.
1012 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1013 let mut dataful_variant = None;
1014 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1016 // Find one non-ZST variant.
1017 'variants: for (v, fields) in variants.iter_enumerated() {
1023 if dataful_variant.is_none() {
1024 dataful_variant = Some(v);
1027 dataful_variant = None;
1032 niche_variants = *niche_variants.start().min(&v)..=v;
1035 if niche_variants.start() > niche_variants.end() {
1036 dataful_variant = None;
1039 if let Some(i) = dataful_variant {
1040 let count = (niche_variants.end().as_u32()
1041 - niche_variants.start().as_u32()
1044 // Find the field with the largest niche
1045 let niche_candidate = variants[i]
1048 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1049 .max_by_key(|(_, niche)| niche.available(dl));
1051 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1052 niche_candidate.and_then(|(field_index, niche)| {
1053 Some((field_index, niche, niche.reserve(self, count)?))
1056 let mut align = dl.aggregate_align;
1060 let mut st = self.univariant_uninterned(
1064 StructKind::AlwaysSized,
1066 st.variants = Variants::Single { index: j };
1068 align = align.max(st.align);
1072 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1074 let offset = st[i].fields.offset(field_index) + niche.offset;
1075 let size = st[i].size;
1077 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1081 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1082 Abi::ScalarPair(ref first, ref second) => {
1083 // We need to use scalar_unit to reset the
1084 // valid range to the maximal one for that
1085 // primitive, because only the niche is
1086 // guaranteed to be initialised, not the
1088 if offset.bytes() == 0 {
1090 niche_scalar.clone(),
1091 scalar_unit(second.value),
1095 scalar_unit(first.value),
1096 niche_scalar.clone(),
1100 _ => Abi::Aggregate { sized: true },
1105 Niche::from_scalar(dl, offset, niche_scalar.clone());
1107 niche_filling_layout = Some(Layout {
1108 variants: Variants::Multiple {
1110 tag_encoding: TagEncoding::Niche {
1118 fields: FieldsShape::Arbitrary {
1119 offsets: vec![offset],
1120 memory_index: vec![0],
1131 let (mut min, mut max) = (i128::MAX, i128::MIN);
1132 let discr_type = def.repr.discr_type();
1133 let bits = Integer::from_attr(self, discr_type).size().bits();
1134 for (i, discr) in def.discriminants(tcx) {
1135 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1138 let mut x = discr.val as i128;
1139 if discr_type.is_signed() {
1140 // sign extend the raw representation to be an i128
1141 x = (x << (128 - bits)) >> (128 - bits);
1150 // We might have no inhabited variants, so pretend there's at least one.
1151 if (min, max) == (i128::MAX, i128::MIN) {
1155 assert!(min <= max, "discriminant range is {}...{}", min, max);
1156 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1158 let mut align = dl.aggregate_align;
1159 let mut size = Size::ZERO;
1161 // We're interested in the smallest alignment, so start large.
1162 let mut start_align = Align::from_bytes(256).unwrap();
1163 assert_eq!(Integer::for_align(dl, start_align), None);
1165 // repr(C) on an enum tells us to make a (tag, union) layout,
1166 // so we need to grow the prefix alignment to be at least
1167 // the alignment of the union. (This value is used both for
1168 // determining the alignment of the overall enum, and the
1169 // determining the alignment of the payload after the tag.)
1170 let mut prefix_align = min_ity.align(dl).abi;
1172 for fields in &variants {
1173 for field in fields {
1174 prefix_align = prefix_align.max(field.align.abi);
1179 // Create the set of structs that represent each variant.
1180 let mut layout_variants = variants
1182 .map(|(i, field_layouts)| {
1183 let mut st = self.univariant_uninterned(
1187 StructKind::Prefixed(min_ity.size(), prefix_align),
1189 st.variants = Variants::Single { index: i };
1190 // Find the first field we can't move later
1191 // to make room for a larger discriminant.
1193 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1195 if !field.is_zst() || field.align.abi.bytes() != 1 {
1196 start_align = start_align.min(field.align.abi);
1200 size = cmp::max(size, st.size);
1201 align = align.max(st.align);
1204 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1206 // Align the maximum variant size to the largest alignment.
1207 size = size.align_to(align.abi);
1209 if size.bytes() >= dl.obj_size_bound() {
1210 return Err(LayoutError::SizeOverflow(ty));
1213 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1214 if typeck_ity < min_ity {
1215 // It is a bug if Layout decided on a greater discriminant size than typeck for
1216 // some reason at this point (based on values discriminant can take on). Mostly
1217 // because this discriminant will be loaded, and then stored into variable of
1218 // type calculated by typeck. Consider such case (a bug): typeck decided on
1219 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1220 // discriminant values. That would be a bug, because then, in codegen, in order
1221 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1222 // space necessary to represent would have to be discarded (or layout is wrong
1223 // on thinking it needs 16 bits)
1225 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1229 // However, it is fine to make discr type however large (as an optimisation)
1230 // after this point – we’ll just truncate the value we load in codegen.
1233 // Check to see if we should use a different type for the
1234 // discriminant. We can safely use a type with the same size
1235 // as the alignment of the first field of each variant.
1236 // We increase the size of the discriminant to avoid LLVM copying
1237 // padding when it doesn't need to. This normally causes unaligned
1238 // load/stores and excessive memcpy/memset operations. By using a
1239 // bigger integer size, LLVM can be sure about its contents and
1240 // won't be so conservative.
1242 // Use the initial field alignment
1243 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1246 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1249 // If the alignment is not larger than the chosen discriminant size,
1250 // don't use the alignment as the final size.
1254 // Patch up the variants' first few fields.
1255 let old_ity_size = min_ity.size();
1256 let new_ity_size = ity.size();
1257 for variant in &mut layout_variants {
1258 match variant.fields {
1259 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1261 if *i <= old_ity_size {
1262 assert_eq!(*i, old_ity_size);
1266 // We might be making the struct larger.
1267 if variant.size <= old_ity_size {
1268 variant.size = new_ity_size;
1276 let tag_mask = !0u128 >> (128 - ity.size().bits());
1278 value: Int(ity, signed),
1279 valid_range: WrappingRange {
1280 start: (min as u128 & tag_mask),
1281 end: (max as u128 & tag_mask),
1284 let mut abi = Abi::Aggregate { sized: true };
1285 if tag.value.size(dl) == size {
1286 abi = Abi::Scalar(tag.clone());
1288 // Try to use a ScalarPair for all tagged enums.
1289 let mut common_prim = None;
1290 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1291 let offsets = match layout_variant.fields {
1292 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1296 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1297 let (field, offset) = match (fields.next(), fields.next()) {
1298 (None, None) => continue,
1299 (Some(pair), None) => pair,
1305 let prim = match field.abi {
1306 Abi::Scalar(ref scalar) => scalar.value,
1312 if let Some(pair) = common_prim {
1313 // This is pretty conservative. We could go fancier
1314 // by conflating things like i32 and u32, or even
1315 // realising that (u8, u8) could just cohabit with
1317 if pair != (prim, offset) {
1322 common_prim = Some((prim, offset));
1325 if let Some((prim, offset)) = common_prim {
1326 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1327 let pair_offsets = match pair.fields {
1328 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1329 assert_eq!(memory_index, &[0, 1]);
1334 if pair_offsets[0] == Size::ZERO
1335 && pair_offsets[1] == *offset
1336 && align == pair.align
1337 && size == pair.size
1339 // We can use `ScalarPair` only when it matches our
1340 // already computed layout (including `#[repr(C)]`).
1346 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1347 abi = Abi::Uninhabited;
1350 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1352 let tagged_layout = Layout {
1353 variants: Variants::Multiple {
1355 tag_encoding: TagEncoding::Direct,
1357 variants: layout_variants,
1359 fields: FieldsShape::Arbitrary {
1360 offsets: vec![Size::ZERO],
1361 memory_index: vec![0],
1369 let best_layout = match (tagged_layout, niche_filling_layout) {
1370 (tagged_layout, Some(niche_filling_layout)) => {
1371 // Pick the smaller layout; otherwise,
1372 // pick the layout with the larger niche; otherwise,
1373 // pick tagged as it has simpler codegen.
1374 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1376 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1377 (layout.size, cmp::Reverse(niche_size))
1380 (tagged_layout, None) => tagged_layout,
1383 tcx.intern_layout(best_layout)
1386 // Types with no meaningful known layout.
1387 ty::Projection(_) | ty::Opaque(..) => {
1388 // NOTE(eddyb) `layout_of` query should've normalized these away,
1389 // if that was possible, so there's no reason to try again here.
1390 return Err(LayoutError::Unknown(ty));
1393 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1394 bug!("Layout::compute: unexpected type `{}`", ty)
1397 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1398 return Err(LayoutError::Unknown(ty));
1404 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1405 #[derive(Clone, Debug, PartialEq)]
1406 enum SavedLocalEligibility {
1408 Assigned(VariantIdx),
1409 // FIXME: Use newtype_index so we aren't wasting bytes
1410 Ineligible(Option<u32>),
1413 // When laying out generators, we divide our saved local fields into two
1414 // categories: overlap-eligible and overlap-ineligible.
1416 // Those fields which are ineligible for overlap go in a "prefix" at the
1417 // beginning of the layout, and always have space reserved for them.
1419 // Overlap-eligible fields are only assigned to one variant, so we lay
1420 // those fields out for each variant and put them right after the
1423 // Finally, in the layout details, we point to the fields from the
1424 // variants they are assigned to. It is possible for some fields to be
1425 // included in multiple variants. No field ever "moves around" in the
1426 // layout; its offset is always the same.
1428 // Also included in the layout are the upvars and the discriminant.
1429 // These are included as fields on the "outer" layout; they are not part
1431 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1432 /// Compute the eligibility and assignment of each local.
1433 fn generator_saved_local_eligibility(
1435 info: &GeneratorLayout<'tcx>,
1436 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1437 use SavedLocalEligibility::*;
1439 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1440 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1442 // The saved locals not eligible for overlap. These will get
1443 // "promoted" to the prefix of our generator.
1444 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1446 // Figure out which of our saved locals are fields in only
1447 // one variant. The rest are deemed ineligible for overlap.
1448 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1449 for local in fields {
1450 match assignments[*local] {
1452 assignments[*local] = Assigned(variant_index);
1455 // We've already seen this local at another suspension
1456 // point, so it is no longer a candidate.
1458 "removing local {:?} in >1 variant ({:?}, {:?})",
1463 ineligible_locals.insert(*local);
1464 assignments[*local] = Ineligible(None);
1471 // Next, check every pair of eligible locals to see if they
1473 for local_a in info.storage_conflicts.rows() {
1474 let conflicts_a = info.storage_conflicts.count(local_a);
1475 if ineligible_locals.contains(local_a) {
1479 for local_b in info.storage_conflicts.iter(local_a) {
1480 // local_a and local_b are storage live at the same time, therefore they
1481 // cannot overlap in the generator layout. The only way to guarantee
1482 // this is if they are in the same variant, or one is ineligible
1483 // (which means it is stored in every variant).
1484 if ineligible_locals.contains(local_b)
1485 || assignments[local_a] == assignments[local_b]
1490 // If they conflict, we will choose one to make ineligible.
1491 // This is not always optimal; it's just a greedy heuristic that
1492 // seems to produce good results most of the time.
1493 let conflicts_b = info.storage_conflicts.count(local_b);
1494 let (remove, other) =
1495 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1496 ineligible_locals.insert(remove);
1497 assignments[remove] = Ineligible(None);
1498 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1502 // Count the number of variants in use. If only one of them, then it is
1503 // impossible to overlap any locals in our layout. In this case it's
1504 // always better to make the remaining locals ineligible, so we can
1505 // lay them out with the other locals in the prefix and eliminate
1506 // unnecessary padding bytes.
1508 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1509 for assignment in &assignments {
1510 if let Assigned(idx) = assignment {
1511 used_variants.insert(*idx);
1514 if used_variants.count() < 2 {
1515 for assignment in assignments.iter_mut() {
1516 *assignment = Ineligible(None);
1518 ineligible_locals.insert_all();
1522 // Write down the order of our locals that will be promoted to the prefix.
1524 for (idx, local) in ineligible_locals.iter().enumerate() {
1525 assignments[local] = Ineligible(Some(idx as u32));
1528 debug!("generator saved local assignments: {:?}", assignments);
1530 (ineligible_locals, assignments)
1533 /// Compute the full generator layout.
1534 fn generator_layout(
1537 def_id: hir::def_id::DefId,
1538 substs: SubstsRef<'tcx>,
1539 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1540 use SavedLocalEligibility::*;
1542 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1544 let info = match tcx.generator_layout(def_id) {
1545 None => return Err(LayoutError::Unknown(ty)),
1548 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1550 // Build a prefix layout, including "promoting" all ineligible
1551 // locals as part of the prefix. We compute the layout of all of
1552 // these fields at once to get optimal packing.
1553 let tag_index = substs.as_generator().prefix_tys().count();
1555 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1556 let max_discr = (info.variant_fields.len() - 1) as u128;
1557 let discr_int = Integer::fit_unsigned(max_discr);
1558 let discr_int_ty = discr_int.to_ty(tcx, false);
1560 value: Primitive::Int(discr_int, false),
1561 valid_range: WrappingRange { start: 0, end: max_discr },
1563 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1564 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1566 let promoted_layouts = ineligible_locals
1568 .map(|local| subst_field(info.field_tys[local]))
1569 .map(|ty| tcx.mk_maybe_uninit(ty))
1570 .map(|ty| self.layout_of(ty));
1571 let prefix_layouts = substs
1574 .map(|ty| self.layout_of(ty))
1575 .chain(iter::once(Ok(tag_layout)))
1576 .chain(promoted_layouts)
1577 .collect::<Result<Vec<_>, _>>()?;
1578 let prefix = self.univariant_uninterned(
1581 &ReprOptions::default(),
1582 StructKind::AlwaysSized,
1585 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1587 // Split the prefix layout into the "outer" fields (upvars and
1588 // discriminant) and the "promoted" fields. Promoted fields will
1589 // get included in each variant that requested them in
1591 debug!("prefix = {:#?}", prefix);
1592 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1593 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1594 let mut inverse_memory_index = invert_mapping(&memory_index);
1596 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1597 // "outer" and "promoted" fields respectively.
1598 let b_start = (tag_index + 1) as u32;
1599 let offsets_b = offsets.split_off(b_start as usize);
1600 let offsets_a = offsets;
1602 // Disentangle the "a" and "b" components of `inverse_memory_index`
1603 // by preserving the order but keeping only one disjoint "half" each.
1604 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1605 let inverse_memory_index_b: Vec<_> =
1606 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1607 inverse_memory_index.retain(|&i| i < b_start);
1608 let inverse_memory_index_a = inverse_memory_index;
1610 // Since `inverse_memory_index_{a,b}` each only refer to their
1611 // respective fields, they can be safely inverted
1612 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1613 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1616 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1617 (outer_fields, offsets_b, memory_index_b)
1622 let mut size = prefix.size;
1623 let mut align = prefix.align;
1627 .map(|(index, variant_fields)| {
1628 // Only include overlap-eligible fields when we compute our variant layout.
1629 let variant_only_tys = variant_fields
1631 .filter(|local| match assignments[**local] {
1632 Unassigned => bug!(),
1633 Assigned(v) if v == index => true,
1634 Assigned(_) => bug!("assignment does not match variant"),
1635 Ineligible(_) => false,
1637 .map(|local| subst_field(info.field_tys[*local]));
1639 let mut variant = self.univariant_uninterned(
1642 .map(|ty| self.layout_of(ty))
1643 .collect::<Result<Vec<_>, _>>()?,
1644 &ReprOptions::default(),
1645 StructKind::Prefixed(prefix_size, prefix_align.abi),
1647 variant.variants = Variants::Single { index };
1649 let (offsets, memory_index) = match variant.fields {
1650 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1654 // Now, stitch the promoted and variant-only fields back together in
1655 // the order they are mentioned by our GeneratorLayout.
1656 // Because we only use some subset (that can differ between variants)
1657 // of the promoted fields, we can't just pick those elements of the
1658 // `promoted_memory_index` (as we'd end up with gaps).
1659 // So instead, we build an "inverse memory_index", as if all of the
1660 // promoted fields were being used, but leave the elements not in the
1661 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1662 // obtain a valid (bijective) mapping.
1663 const INVALID_FIELD_IDX: u32 = !0;
1664 let mut combined_inverse_memory_index =
1665 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1666 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1667 let combined_offsets = variant_fields
1671 let (offset, memory_index) = match assignments[*local] {
1672 Unassigned => bug!(),
1674 let (offset, memory_index) =
1675 offsets_and_memory_index.next().unwrap();
1676 (offset, promoted_memory_index.len() as u32 + memory_index)
1678 Ineligible(field_idx) => {
1679 let field_idx = field_idx.unwrap() as usize;
1680 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1683 combined_inverse_memory_index[memory_index as usize] = i as u32;
1688 // Remove the unused slots and invert the mapping to obtain the
1689 // combined `memory_index` (also see previous comment).
1690 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1691 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1693 variant.fields = FieldsShape::Arbitrary {
1694 offsets: combined_offsets,
1695 memory_index: combined_memory_index,
1698 size = size.max(variant.size);
1699 align = align.max(variant.align);
1702 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1704 size = size.align_to(align.abi);
1706 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1710 Abi::Aggregate { sized: true }
1713 let layout = tcx.intern_layout(Layout {
1714 variants: Variants::Multiple {
1716 tag_encoding: TagEncoding::Direct,
1717 tag_field: tag_index,
1720 fields: outer_fields,
1722 largest_niche: prefix.largest_niche,
1726 debug!("generator layout ({:?}): {:#?}", ty, layout);
1730 /// This is invoked by the `layout_of` query to record the final
1731 /// layout of each type.
1733 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1734 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1735 // for dumping later.
1736 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1737 self.record_layout_for_printing_outlined(layout)
1741 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1742 // Ignore layouts that are done with non-empty environments or
1743 // non-monomorphic layouts, as the user only wants to see the stuff
1744 // resulting from the final codegen session.
1745 if layout.ty.definitely_has_param_types_or_consts(self.tcx)
1746 || !self.param_env.caller_bounds().is_empty()
1751 // (delay format until we actually need it)
1752 let record = |kind, packed, opt_discr_size, variants| {
1753 let type_desc = format!("{:?}", layout.ty);
1754 self.tcx.sess.code_stats.record_type_size(
1765 let adt_def = match *layout.ty.kind() {
1766 ty::Adt(ref adt_def, _) => {
1767 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1771 ty::Closure(..) => {
1772 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1773 record(DataTypeKind::Closure, false, None, vec![]);
1778 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1783 let adt_kind = adt_def.adt_kind();
1784 let adt_packed = adt_def.repr.pack.is_some();
1786 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1787 let mut min_size = Size::ZERO;
1788 let field_info: Vec<_> = flds
1791 .map(|(i, &name)| match layout.field(self, i) {
1793 bug!("no layout found for field {}: `{:?}`", name, err);
1795 Ok(field_layout) => {
1796 let offset = layout.fields.offset(i);
1797 let field_end = offset + field_layout.size;
1798 if min_size < field_end {
1799 min_size = field_end;
1802 name: name.to_string(),
1803 offset: offset.bytes(),
1804 size: field_layout.size.bytes(),
1805 align: field_layout.align.abi.bytes(),
1812 name: n.map(|n| n.to_string()),
1813 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1814 align: layout.align.abi.bytes(),
1815 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1820 match layout.variants {
1821 Variants::Single { index } => {
1822 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1823 if !adt_def.variants.is_empty() {
1824 let variant_def = &adt_def.variants[index];
1825 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1830 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1833 // (This case arises for *empty* enums; so give it
1835 record(adt_kind.into(), adt_packed, None, vec![]);
1839 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1841 "print-type-size `{:#?}` adt general variants def {}",
1843 adt_def.variants.len()
1845 let variant_infos: Vec<_> = adt_def
1848 .map(|(i, variant_def)| {
1849 let fields: Vec<_> =
1850 variant_def.fields.iter().map(|f| f.ident.name).collect();
1852 Some(variant_def.ident),
1854 layout.for_variant(self, i),
1861 match tag_encoding {
1862 TagEncoding::Direct => Some(tag.value.size(self)),
1872 /// Type size "skeleton", i.e., the only information determining a type's size.
1873 /// While this is conservative, (aside from constant sizes, only pointers,
1874 /// newtypes thereof and null pointer optimized enums are allowed), it is
1875 /// enough to statically check common use cases of transmute.
1876 #[derive(Copy, Clone, Debug)]
1877 pub enum SizeSkeleton<'tcx> {
1878 /// Any statically computable Layout.
1881 /// A potentially-fat pointer.
1883 /// If true, this pointer is never null.
1885 /// The type which determines the unsized metadata, if any,
1886 /// of this pointer. Either a type parameter or a projection
1887 /// depending on one, with regions erased.
1892 impl<'tcx> SizeSkeleton<'tcx> {
1896 param_env: ty::ParamEnv<'tcx>,
1897 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1898 debug_assert!(!ty.has_infer_types_or_consts());
1900 // First try computing a static layout.
1901 let err = match tcx.layout_of(param_env.and(ty)) {
1903 return Ok(SizeSkeleton::Known(layout.size));
1909 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1910 let non_zero = !ty.is_unsafe_ptr();
1911 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1913 ty::Param(_) | ty::Projection(_) => {
1914 debug_assert!(tail.definitely_has_param_types_or_consts(tcx));
1915 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1918 "SizeSkeleton::compute({}): layout errored ({}), yet \
1919 tail `{}` is not a type parameter or a projection",
1927 ty::Adt(def, substs) => {
1928 // Only newtypes and enums w/ nullable pointer optimization.
1929 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1933 // Get a zero-sized variant or a pointer newtype.
1934 let zero_or_ptr_variant = |i| {
1935 let i = VariantIdx::new(i);
1936 let fields = def.variants[i]
1939 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1941 for field in fields {
1944 SizeSkeleton::Known(size) => {
1945 if size.bytes() > 0 {
1949 SizeSkeleton::Pointer { .. } => {
1960 let v0 = zero_or_ptr_variant(0)?;
1962 if def.variants.len() == 1 {
1963 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1964 return Ok(SizeSkeleton::Pointer {
1966 || match tcx.layout_scalar_valid_range(def.did) {
1967 (Bound::Included(start), Bound::Unbounded) => start > 0,
1968 (Bound::Included(start), Bound::Included(end)) => {
1969 0 < start && start < end
1980 let v1 = zero_or_ptr_variant(1)?;
1981 // Nullable pointer enum optimization.
1983 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1984 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1985 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1991 ty::Projection(_) | ty::Opaque(..) => {
1992 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1993 if ty == normalized {
1996 SizeSkeleton::compute(normalized, tcx, param_env)
2004 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2005 match (self, other) {
2006 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2007 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2015 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2016 fn tcx(&self) -> TyCtxt<'tcx>;
2019 pub trait HasParamEnv<'tcx> {
2020 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2023 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2025 fn data_layout(&self) -> &TargetDataLayout {
2030 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2032 fn tcx(&self) -> TyCtxt<'tcx> {
2037 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2038 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2043 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2044 fn data_layout(&self) -> &TargetDataLayout {
2045 self.tcx.data_layout()
2049 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2050 fn tcx(&self) -> TyCtxt<'tcx> {
2055 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2057 impl LayoutOf<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2059 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2061 /// Computes the layout of a type. Note that this implicitly
2062 /// executes in "reveal all" mode, and will normalize the input type.
2064 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2065 self.tcx.layout_of(self.param_env.and(ty))
2069 impl LayoutOf<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2071 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2073 /// Computes the layout of a type. Note that this implicitly
2074 /// executes in "reveal all" mode, and will normalize the input type.
2076 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2077 self.tcx.layout_of(self.param_env.and(ty))
2081 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2083 C: LayoutOf<'tcx, Ty = Ty<'tcx>> + HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2085 fn ty_and_layout_for_variant(
2086 this: TyAndLayout<'tcx>,
2088 variant_index: VariantIdx,
2089 ) -> TyAndLayout<'tcx> {
2090 let layout = match this.variants {
2091 Variants::Single { index }
2092 // If all variants but one are uninhabited, the variant layout is the enum layout.
2093 if index == variant_index &&
2094 // Don't confuse variants of uninhabited enums with the enum itself.
2095 // For more details see https://github.com/rust-lang/rust/issues/69763.
2096 this.fields != FieldsShape::Primitive =>
2101 Variants::Single { index } => {
2102 // Deny calling for_variant more than once for non-Single enums.
2103 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2104 assert_eq!(original_layout.variants, Variants::Single { index });
2107 let fields = match this.ty.kind() {
2108 ty::Adt(def, _) if def.variants.is_empty() =>
2109 bug!("for_variant called on zero-variant enum"),
2110 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2114 tcx.intern_layout(Layout {
2115 variants: Variants::Single { index: variant_index },
2116 fields: match NonZeroUsize::new(fields) {
2117 Some(fields) => FieldsShape::Union(fields),
2118 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2120 abi: Abi::Uninhabited,
2121 largest_niche: None,
2122 align: tcx.data_layout.i8_align,
2127 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2130 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2132 TyAndLayout { ty: this.ty, layout }
2135 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2136 enum TyMaybeWithLayout<'tcx, C: LayoutOf<'tcx>> {
2138 TyAndLayout(C::TyAndLayout),
2141 fn ty_and_layout_kind<
2142 C: LayoutOf<'tcx, Ty = Ty<'tcx>> + HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2144 this: TyAndLayout<'tcx>,
2148 ) -> TyMaybeWithLayout<'tcx, C> {
2150 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2151 let layout = Layout::scalar(cx, tag.clone());
2152 MaybeResult::from(Ok(TyAndLayout {
2153 layout: tcx.intern_layout(layout),
2154 ty: tag.value.to_ty(tcx),
2167 | ty::GeneratorWitness(..)
2169 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2171 // Potentially-fat pointers.
2172 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2173 assert!(i < this.fields.count());
2175 // Reuse the fat `*T` type as its own thin pointer data field.
2176 // This provides information about, e.g., DST struct pointees
2177 // (which may have no non-DST form), and will work as long
2178 // as the `Abi` or `FieldsShape` is checked by users.
2180 let nil = tcx.mk_unit();
2181 let ptr_ty = if ty.is_unsafe_ptr() {
2184 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2186 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2187 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2194 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2195 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2196 ty::Dynamic(_, _) => {
2197 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2198 tcx.lifetimes.re_static,
2199 tcx.mk_array(tcx.types.usize, 3),
2201 /* FIXME: use actual fn pointers
2202 Warning: naively computing the number of entries in the
2203 vtable by counting the methods on the trait + methods on
2204 all parent traits does not work, because some methods can
2205 be not object safe and thus excluded from the vtable.
2206 Increase this counter if you tried to implement this but
2207 failed to do it without duplicating a lot of code from
2208 other places in the compiler: 2
2210 tcx.mk_array(tcx.types.usize, 3),
2211 tcx.mk_array(Option<fn()>),
2215 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2219 // Arrays and slices.
2220 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2221 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2223 // Tuples, generators and closures.
2224 ty::Closure(_, ref substs) => {
2225 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2228 ty::Generator(def_id, ref substs, _) => match this.variants {
2229 Variants::Single { index } => TyMaybeWithLayout::Ty(
2232 .state_tys(def_id, tcx)
2233 .nth(index.as_usize())
2238 Variants::Multiple { ref tag, tag_field, .. } => {
2240 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2242 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2246 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2249 ty::Adt(def, substs) => {
2250 match this.variants {
2251 Variants::Single { index } => {
2252 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2255 // Discriminant field for enums (where applicable).
2256 Variants::Multiple { ref tag, .. } => {
2258 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2265 | ty::Placeholder(..)
2269 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2273 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2274 TyMaybeWithLayout::Ty(result) => result,
2275 TyMaybeWithLayout::TyAndLayout(result) => return result,
2279 fn ty_and_layout_pointee_info_at(
2280 this: TyAndLayout<'tcx>,
2283 ) -> Option<PointeeInfo> {
2284 let addr_space_of_ty = |ty: Ty<'tcx>| {
2285 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2288 let pointee_info = match *this.ty.kind() {
2289 ty::RawPtr(mt) if offset.bytes() == 0 => {
2290 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2292 align: layout.align.abi,
2294 address_space: addr_space_of_ty(mt.ty),
2297 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2298 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2301 align: layout.align.abi,
2303 address_space: cx.data_layout().instruction_address_space,
2307 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2308 let address_space = addr_space_of_ty(ty);
2310 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2311 // Use conservative pointer kind if not optimizing. This saves us the
2312 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2313 // attributes in LLVM have compile-time cost even in unoptimized builds).
2317 hir::Mutability::Not => {
2318 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2324 hir::Mutability::Mut => {
2325 // References to self-referential structures should not be considered
2326 // noalias, as another pointer to the structure can be obtained, that
2327 // is not based-on the original reference. We consider all !Unpin
2328 // types to be potentially self-referential here.
2329 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2330 PointerKind::UniqueBorrowed
2338 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2340 align: layout.align.abi,
2347 let mut data_variant = match this.variants {
2348 // Within the discriminant field, only the niche itself is
2349 // always initialized, so we only check for a pointer at its
2352 // If the niche is a pointer, it's either valid (according
2353 // to its type), or null (which the niche field's scalar
2354 // validity range encodes). This allows using
2355 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2356 // this will continue to work as long as we don't start
2357 // using more niches than just null (e.g., the first page of
2358 // the address space, or unaligned pointers).
2359 Variants::Multiple {
2360 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2363 } if this.fields.offset(tag_field) == offset => {
2364 Some(this.for_variant(cx, dataful_variant))
2369 if let Some(variant) = data_variant {
2370 // We're not interested in any unions.
2371 if let FieldsShape::Union(_) = variant.fields {
2372 data_variant = None;
2376 let mut result = None;
2378 if let Some(variant) = data_variant {
2379 let ptr_end = offset + Pointer.size(cx);
2380 for i in 0..variant.fields.count() {
2381 let field_start = variant.fields.offset(i);
2382 if field_start <= offset {
2383 let field = variant.field(cx, i);
2384 result = field.to_result().ok().and_then(|field| {
2385 if ptr_end <= field_start + field.size {
2386 // We found the right field, look inside it.
2388 field.pointee_info_at(cx, offset - field_start);
2394 if result.is_some() {
2401 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2402 if let Some(ref mut pointee) = result {
2403 if let ty::Adt(def, _) = this.ty.kind() {
2404 if def.is_box() && offset.bytes() == 0 {
2405 pointee.safe = Some(PointerKind::UniqueOwned);
2415 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2425 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2427 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2428 use crate::ty::layout::LayoutError::*;
2429 mem::discriminant(self).hash_stable(hcx, hasher);
2432 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2437 impl<'tcx> ty::Instance<'tcx> {
2438 // NOTE(eddyb) this is private to avoid using it from outside of
2439 // `FnAbi::of_instance` - any other uses are either too high-level
2440 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2441 // or should go through `FnAbi` instead, to avoid losing any
2442 // adjustments `FnAbi::of_instance` might be performing.
2443 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2444 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2445 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2448 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2449 // parameters unused if they show up in the signature, but not in the `mir::Body`
2450 // (i.e. due to being inside a projection that got normalized, see
2451 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2452 // track of a polymorphization `ParamEnv` to allow normalizing later.
2453 let mut sig = match *ty.kind() {
2454 ty::FnDef(def_id, substs) => tcx
2455 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2456 .subst(tcx, substs),
2457 _ => unreachable!(),
2460 if let ty::InstanceDef::VtableShim(..) = self.def {
2461 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2462 sig = sig.map_bound(|mut sig| {
2463 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2464 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2465 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2471 ty::Closure(def_id, substs) => {
2472 let sig = substs.as_closure().sig();
2474 let bound_vars = tcx.mk_bound_variable_kinds(
2477 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2479 let br = ty::BoundRegion {
2480 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2481 kind: ty::BoundRegionKind::BrEnv,
2483 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2484 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2486 let sig = sig.skip_binder();
2487 ty::Binder::bind_with_vars(
2489 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2498 ty::Generator(_, substs, _) => {
2499 let sig = substs.as_generator().poly_sig();
2501 let bound_vars = tcx.mk_bound_variable_kinds(
2504 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2506 let br = ty::BoundRegion {
2507 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2508 kind: ty::BoundRegionKind::BrEnv,
2510 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2511 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2513 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2514 let pin_adt_ref = tcx.adt_def(pin_did);
2515 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2516 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2518 let sig = sig.skip_binder();
2519 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2520 let state_adt_ref = tcx.adt_def(state_did);
2521 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2522 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2523 ty::Binder::bind_with_vars(
2525 [env_ty, sig.resume_ty].iter(),
2528 hir::Unsafety::Normal,
2529 rustc_target::spec::abi::Abi::Rust,
2534 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2539 pub trait FnAbiExt<'tcx, C>
2541 C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2545 + HasParamEnv<'tcx>,
2547 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2549 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2550 /// instead, where the instance is an `InstanceDef::Virtual`.
2551 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2553 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2554 /// direct calls to an `fn`.
2556 /// NB: that includes virtual calls, which are represented by "direct calls"
2557 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2558 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2562 sig: ty::PolyFnSig<'tcx>,
2563 extra_args: &[Ty<'tcx>],
2564 caller_location: Option<Ty<'tcx>>,
2565 codegen_fn_attr_flags: CodegenFnAttrFlags,
2566 make_self_ptr_thin: bool,
2568 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2571 /// Calculates whether a function's ABI can unwind or not.
2573 /// This takes two primary parameters:
2575 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2576 /// codegen attrs for a defined function. For function pointers this set of
2577 /// flags is the empty set. This is only applicable for Rust-defined
2578 /// functions, and generally isn't needed except for small optimizations where
2579 /// we try to say a function which otherwise might look like it could unwind
2580 /// doesn't actually unwind (such as for intrinsics and such).
2582 /// * `abi` - this is the ABI that the function is defined with. This is the
2583 /// primary factor for determining whether a function can unwind or not.
2585 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2586 /// panics are implemented with unwinds on most platform (when
2587 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2588 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2589 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2590 /// defined for each ABI individually, but it always corresponds to some form of
2591 /// stack-based unwinding (the exact mechanism of which varies
2592 /// platform-by-platform).
2594 /// Rust functions are classfied whether or not they can unwind based on the
2595 /// active "panic strategy". In other words Rust functions are considered to
2596 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2597 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2598 /// only if the final panic mode is panic=abort. In this scenario any code
2599 /// previously compiled assuming that a function can unwind is still correct, it
2600 /// just never happens to actually unwind at runtime.
2602 /// This function's answer to whether or not a function can unwind is quite
2603 /// impactful throughout the compiler. This affects things like:
2605 /// * Calling a function which can't unwind means codegen simply ignores any
2606 /// associated unwinding cleanup.
2607 /// * Calling a function which can unwind from a function which can't unwind
2608 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2609 /// aborts the process.
2610 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2611 /// affects various optimizations and codegen.
2613 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2614 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2615 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2616 /// might (from a foreign exception or similar).
2618 pub fn fn_can_unwind(
2620 codegen_fn_attr_flags: CodegenFnAttrFlags,
2623 // Special attribute for functions which can't unwind.
2624 if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2628 // Otherwise if this isn't special then unwinding is generally determined by
2629 // the ABI of the itself. ABIs like `C` have variants which also
2630 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2631 // ABIs have such an option. Otherwise the only other thing here is Rust
2632 // itself, and those ABIs are determined by the panic strategy configured
2633 // for this compilation.
2635 // Unfortunately at this time there's also another caveat. Rust [RFC
2636 // 2945][rfc] has been accepted and is in the process of being implemented
2637 // and stabilized. In this interim state we need to deal with historical
2638 // rustc behavior as well as plan for future rustc behavior.
2640 // Historically functions declared with `extern "C"` were marked at the
2641 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2642 // or not. This is UB for functions in `panic=unwind` mode that then
2643 // actually panic and unwind. Note that this behavior is true for both
2644 // externally declared functions as well as Rust-defined function.
2646 // To fix this UB rustc would like to change in the future to catch unwinds
2647 // from function calls that may unwind within a Rust-defined `extern "C"`
2648 // function and forcibly abort the process, thereby respecting the
2649 // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2650 // ready to roll out, so determining whether or not the `C` family of ABIs
2651 // unwinds is conditional not only on their definition but also whether the
2652 // `#![feature(c_unwind)]` feature gate is active.
2654 // Note that this means that unlike historical compilers rustc now, by
2655 // default, unconditionally thinks that the `C` ABI may unwind. This will
2656 // prevent some optimization opportunities, however, so we try to scope this
2657 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2658 // to `panic=abort`).
2660 // Eventually the check against `c_unwind` here will ideally get removed and
2661 // this'll be a little cleaner as it'll be a straightforward check of the
2664 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2667 C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
2669 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2683 | AvrNonBlockingInterrupt
2684 | CCmseNonSecureCall
2688 | Unadjusted => false,
2689 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2694 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2695 use rustc_target::spec::abi::Abi::*;
2696 match tcx.sess.target.adjust_abi(abi) {
2697 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2699 // It's the ABI's job to select this, not ours.
2700 System { .. } => bug!("system abi should be selected elsewhere"),
2701 EfiApi => bug!("eficall abi should be selected elsewhere"),
2703 Stdcall { .. } => Conv::X86Stdcall,
2704 Fastcall => Conv::X86Fastcall,
2705 Vectorcall => Conv::X86VectorCall,
2706 Thiscall { .. } => Conv::X86ThisCall,
2707 C { .. } => Conv::C,
2708 Unadjusted => Conv::C,
2709 Win64 => Conv::X86_64Win64,
2710 SysV64 => Conv::X86_64SysV,
2711 Aapcs => Conv::ArmAapcs,
2712 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2713 PtxKernel => Conv::PtxKernel,
2714 Msp430Interrupt => Conv::Msp430Intr,
2715 X86Interrupt => Conv::X86Intr,
2716 AmdGpuKernel => Conv::AmdGpuKernel,
2717 AvrInterrupt => Conv::AvrInterrupt,
2718 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2721 // These API constants ought to be more specific...
2726 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2728 C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2732 + HasParamEnv<'tcx>,
2734 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2735 call::FnAbi::new_internal(cx, sig, extra_args, None, CodegenFnAttrFlags::empty(), false)
2738 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2739 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2741 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2742 Some(cx.tcx().caller_location_ty())
2747 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2749 call::FnAbi::new_internal(
2755 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2761 sig: ty::PolyFnSig<'tcx>,
2762 extra_args: &[Ty<'tcx>],
2763 caller_location: Option<Ty<'tcx>>,
2764 codegen_fn_attr_flags: CodegenFnAttrFlags,
2765 force_thin_self_ptr: bool,
2767 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2769 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2771 let conv = conv_from_spec_abi(cx.tcx(), sig.abi);
2773 let mut inputs = sig.inputs();
2774 let extra_args = if sig.abi == RustCall {
2775 assert!(!sig.c_variadic && extra_args.is_empty());
2777 if let Some(input) = sig.inputs().last() {
2778 if let ty::Tuple(tupled_arguments) = input.kind() {
2779 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2780 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2783 "argument to function with \"rust-call\" ABI \
2789 "argument to function with \"rust-call\" ABI \
2794 assert!(sig.c_variadic || extra_args.is_empty());
2798 let target = &cx.tcx().sess.target;
2799 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2800 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2801 let linux_s390x_gnu_like =
2802 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2803 let linux_sparc64_gnu_like =
2804 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2805 let linux_powerpc_gnu_like =
2806 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2808 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2810 // Handle safe Rust thin and fat pointers.
2811 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2813 layout: TyAndLayout<'tcx>,
2816 // Booleans are always an i1 that needs to be zero-extended.
2817 if scalar.is_bool() {
2818 attrs.ext(ArgExtension::Zext);
2822 // Only pointer types handled below.
2823 if scalar.value != Pointer {
2827 if !scalar.valid_range.contains_zero() {
2828 attrs.set(ArgAttribute::NonNull);
2831 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2832 if let Some(kind) = pointee.safe {
2833 attrs.pointee_align = Some(pointee.align);
2835 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2836 // for the entire duration of the function as they can be deallocated
2837 // at any time. Set their valid size to 0.
2838 attrs.pointee_size = match kind {
2839 PointerKind::UniqueOwned => Size::ZERO,
2843 // `Box` pointer parameters never alias because ownership is transferred
2844 // `&mut` pointer parameters never alias other parameters,
2845 // or mutable global data
2847 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2848 // and can be marked as both `readonly` and `noalias`, as
2849 // LLVM's definition of `noalias` is based solely on memory
2850 // dependencies rather than pointer equality
2852 // Due to miscompiles in LLVM < 12, we apply a separate NoAliasMutRef attribute
2853 // for UniqueBorrowed arguments, so that the codegen backend can decide
2854 // whether or not to actually emit the attribute.
2855 let no_alias = match kind {
2856 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
2857 PointerKind::UniqueOwned => true,
2858 PointerKind::Frozen => !is_return,
2861 attrs.set(ArgAttribute::NoAlias);
2864 if kind == PointerKind::Frozen && !is_return {
2865 attrs.set(ArgAttribute::ReadOnly);
2868 if kind == PointerKind::UniqueBorrowed && !is_return {
2869 attrs.set(ArgAttribute::NoAliasMutRef);
2875 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2876 let is_return = arg_idx.is_none();
2878 let layout = cx.layout_of(ty);
2879 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2880 // Don't pass the vtable, it's not an argument of the virtual fn.
2881 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2882 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2883 make_thin_self_ptr(cx, layout)
2888 let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2889 let mut attrs = ArgAttributes::new();
2890 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2894 if arg.layout.is_zst() {
2895 // For some forsaken reason, x86_64-pc-windows-gnu
2896 // doesn't ignore zero-sized struct arguments.
2897 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2901 && !linux_s390x_gnu_like
2902 && !linux_sparc64_gnu_like
2903 && !linux_powerpc_gnu_like)
2905 arg.mode = PassMode::Ignore;
2912 let mut fn_abi = FnAbi {
2913 ret: arg_of(sig.output(), None),
2918 .chain(caller_location)
2920 .map(|(i, ty)| arg_of(ty, Some(i)))
2922 c_variadic: sig.c_variadic,
2923 fixed_count: inputs.len(),
2925 can_unwind: fn_can_unwind(cx.tcx(), codegen_fn_attr_flags, sig.abi),
2927 fn_abi.adjust_for_abi(cx, sig.abi);
2928 debug!("FnAbi::new_internal = {:?}", fn_abi);
2932 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2933 if abi == SpecAbi::Unadjusted {
2937 if abi == SpecAbi::Rust
2938 || abi == SpecAbi::RustCall
2939 || abi == SpecAbi::RustIntrinsic
2940 || abi == SpecAbi::PlatformIntrinsic
2942 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2943 if arg.is_ignore() {
2947 match arg.layout.abi {
2948 Abi::Aggregate { .. } => {}
2950 // This is a fun case! The gist of what this is doing is
2951 // that we want callers and callees to always agree on the
2952 // ABI of how they pass SIMD arguments. If we were to *not*
2953 // make these arguments indirect then they'd be immediates
2954 // in LLVM, which means that they'd used whatever the
2955 // appropriate ABI is for the callee and the caller. That
2956 // means, for example, if the caller doesn't have AVX
2957 // enabled but the callee does, then passing an AVX argument
2958 // across this boundary would cause corrupt data to show up.
2960 // This problem is fixed by unconditionally passing SIMD
2961 // arguments through memory between callers and callees
2962 // which should get them all to agree on ABI regardless of
2963 // target feature sets. Some more information about this
2964 // issue can be found in #44367.
2966 // Note that the platform intrinsic ABI is exempt here as
2967 // that's how we connect up to LLVM and it's unstable
2968 // anyway, we control all calls to it in libstd.
2970 if abi != SpecAbi::PlatformIntrinsic
2971 && cx.tcx().sess.target.simd_types_indirect =>
2973 arg.make_indirect();
2980 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2981 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2982 let max_by_val_size = Pointer.size(cx) * 2;
2983 let size = arg.layout.size;
2985 if arg.layout.is_unsized() || size > max_by_val_size {
2986 arg.make_indirect();
2988 // We want to pass small aggregates as immediates, but using
2989 // a LLVM aggregate type for this leads to bad optimizations,
2990 // so we pick an appropriately sized integer type instead.
2991 arg.cast_to(Reg { kind: RegKind::Integer, size });
2994 fixup(&mut self.ret);
2995 for arg in &mut self.args {
3001 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
3002 cx.tcx().sess.fatal(&msg);
3007 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
3009 C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
3011 + HasParamEnv<'tcx>,
3013 let fat_pointer_ty = if layout.is_unsized() {
3014 // unsized `self` is passed as a pointer to `self`
3015 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3016 cx.tcx().mk_mut_ptr(layout.ty)
3019 Abi::ScalarPair(..) => (),
3020 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3023 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3024 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3025 // elsewhere in the compiler as a method on a `dyn Trait`.
3026 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3027 // get a built-in pointer type
3028 let mut fat_pointer_layout = layout;
3029 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3030 && !fat_pointer_layout.ty.is_region_ptr()
3032 for i in 0..fat_pointer_layout.fields.count() {
3033 let field_layout = fat_pointer_layout.field(cx, i);
3035 if !field_layout.is_zst() {
3036 fat_pointer_layout = field_layout;
3037 continue 'descend_newtypes;
3041 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3044 fat_pointer_layout.ty
3047 // we now have a type like `*mut RcBox<dyn Trait>`
3048 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3049 // this is understood as a special case elsewhere in the compiler
3050 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
3051 layout = cx.layout_of(unit_pointer_ty);
3052 layout.ty = fat_pointer_ty;