1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44 impl IntegerExt for Integer {
46 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
47 match (*self, signed) {
48 (I8, false) => tcx.types.u8,
49 (I16, false) => tcx.types.u16,
50 (I32, false) => tcx.types.u32,
51 (I64, false) => tcx.types.u64,
52 (I128, false) => tcx.types.u128,
53 (I8, true) => tcx.types.i8,
54 (I16, true) => tcx.types.i16,
55 (I32, true) => tcx.types.i32,
56 (I64, true) => tcx.types.i64,
57 (I128, true) => tcx.types.i128,
61 /// Gets the Integer type from an attr::IntType.
62 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
63 let dl = cx.data_layout();
66 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
67 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
68 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
69 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
70 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
71 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
72 dl.ptr_sized_integer()
77 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
80 ty::IntTy::I16 => I16,
81 ty::IntTy::I32 => I32,
82 ty::IntTy::I64 => I64,
83 ty::IntTy::I128 => I128,
84 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
87 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
90 ty::UintTy::U16 => I16,
91 ty::UintTy::U32 => I32,
92 ty::UintTy::U64 => I64,
93 ty::UintTy::U128 => I128,
94 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
98 /// Finds the appropriate Integer type and signedness for the given
99 /// signed discriminant range and `#[repr]` attribute.
100 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
101 /// that shouldn't affect anything, other than maybe debuginfo.
108 ) -> (Integer, bool) {
109 // Theoretically, negative values could be larger in unsigned representation
110 // than the unsigned representation of the signed minimum. However, if there
111 // are any negative values, the only valid unsigned representation is u128
112 // which can fit all i128 values, so the result remains unaffected.
113 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
114 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
116 if let Some(ity) = repr.int {
117 let discr = Integer::from_attr(&tcx, ity);
118 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
121 "Integer::repr_discr: `#[repr]` hint too small for \
122 discriminant range of enum `{}",
126 return (discr, ity.is_signed());
129 let at_least = if repr.c() {
130 // This is usually I32, however it can be different on some platforms,
131 // notably hexagon and arm-none/thumb-none
132 tcx.data_layout().c_enum_min_size
134 // repr(Rust) enums try to be as small as possible
138 // If there are no negative values, we can use the unsigned fit.
140 (cmp::max(unsigned_fit, at_least), false)
142 (cmp::max(signed_fit, at_least), true)
147 pub trait PrimitiveExt {
148 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
149 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
152 impl PrimitiveExt for Primitive {
154 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
156 Int(i, signed) => i.to_ty(tcx, signed),
157 F32 => tcx.types.f32,
158 F64 => tcx.types.f64,
159 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
163 /// Return an *integer* type matching this primitive.
164 /// Useful in particular when dealing with enum discriminants.
166 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
168 Int(i, signed) => i.to_ty(tcx, signed),
169 Pointer => tcx.types.usize,
170 F32 | F64 => bug!("floats do not have an int type"),
175 /// The first half of a fat pointer.
177 /// - For a trait object, this is the address of the box.
178 /// - For a slice, this is the base address.
179 pub const FAT_PTR_ADDR: usize = 0;
181 /// The second half of a fat pointer.
183 /// - For a trait object, this is the address of the vtable.
184 /// - For a slice, this is the length.
185 pub const FAT_PTR_EXTRA: usize = 1;
187 /// The maximum supported number of lanes in a SIMD vector.
189 /// This value is selected based on backend support:
190 /// * LLVM does not appear to have a vector width limit.
191 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
192 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
194 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
195 pub enum LayoutError<'tcx> {
197 SizeOverflow(Ty<'tcx>),
200 impl<'tcx> fmt::Display for LayoutError<'tcx> {
201 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
203 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
204 LayoutError::SizeOverflow(ty) => {
205 write!(f, "values of the type `{}` are too big for the current architecture", ty)
213 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
214 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
215 ty::tls::with_related_context(tcx, move |icx| {
216 let (param_env, ty) = query.into_parts();
218 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
219 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
222 // Update the ImplicitCtxt to increase the layout_depth
223 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
225 ty::tls::enter_context(&icx, |_| {
226 let param_env = param_env.with_reveal_all_normalized(tcx);
227 let unnormalized_ty = ty;
228 let ty = tcx.normalize_erasing_regions(param_env, ty);
229 if ty != unnormalized_ty {
230 // Ensure this layout is also cached for the normalized type.
231 return tcx.layout_of(param_env.and(ty));
234 let cx = LayoutCx { tcx, param_env };
236 let layout = cx.layout_of_uncached(ty)?;
237 let layout = TyAndLayout { ty, layout };
239 cx.record_layout_for_printing(layout);
241 // Type-level uninhabitedness should always imply ABI uninhabitedness.
242 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
243 assert!(layout.abi.is_uninhabited());
251 pub fn provide(providers: &mut ty::query::Providers) {
252 *providers = ty::query::Providers { layout_of, ..*providers };
255 pub struct LayoutCx<'tcx, C> {
257 pub param_env: ty::ParamEnv<'tcx>,
260 #[derive(Copy, Clone, Debug)]
262 /// A tuple, closure, or univariant which cannot be coerced to unsized.
264 /// A univariant, the last field of which may be coerced to unsized.
266 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
267 Prefixed(Size, Align),
270 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
271 // This is used to go between `memory_index` (source field order to memory order)
272 // and `inverse_memory_index` (memory order to source field order).
273 // See also `FieldsShape::Arbitrary::memory_index` for more details.
274 // FIXME(eddyb) build a better abstraction for permutations, if possible.
275 fn invert_mapping(map: &[u32]) -> Vec<u32> {
276 let mut inverse = vec![0; map.len()];
277 for i in 0..map.len() {
278 inverse[map[i] as usize] = i as u32;
283 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
284 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
285 let dl = self.data_layout();
286 let b_align = b.value.align(dl);
287 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
288 let b_offset = a.value.size(dl).align_to(b_align.abi);
289 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
291 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
292 // returns the last maximum.
293 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
295 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
296 .max_by_key(|niche| niche.available(dl));
299 variants: Variants::Single { index: VariantIdx::new(0) },
300 fields: FieldsShape::Arbitrary {
301 offsets: vec![Size::ZERO, b_offset],
302 memory_index: vec![0, 1],
304 abi: Abi::ScalarPair(a, b),
311 fn univariant_uninterned(
314 fields: &[TyAndLayout<'_>],
317 ) -> Result<Layout, LayoutError<'tcx>> {
318 let dl = self.data_layout();
319 let pack = repr.pack;
320 if pack.is_some() && repr.align.is_some() {
321 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
322 return Err(LayoutError::Unknown(ty));
325 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
327 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
329 let optimize = !repr.inhibit_struct_field_reordering_opt();
332 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
333 let optimizing = &mut inverse_memory_index[..end];
334 let field_align = |f: &TyAndLayout<'_>| {
335 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
338 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
339 optimizing.sort_by_key(|&x| {
340 // Place ZSTs first to avoid "interesting offsets",
341 // especially with only one or two non-ZST fields.
342 let f = &fields[x as usize];
343 (!f.is_zst(), cmp::Reverse(field_align(f)))
346 StructKind::Prefixed(..) => {
347 // Sort in ascending alignment so that the layout stay optimal
348 // regardless of the prefix
349 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
354 // inverse_memory_index holds field indices by increasing memory offset.
355 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
356 // We now write field offsets to the corresponding offset slot;
357 // field 5 with offset 0 puts 0 in offsets[5].
358 // At the bottom of this function, we invert `inverse_memory_index` to
359 // produce `memory_index` (see `invert_mapping`).
361 let mut sized = true;
362 let mut offsets = vec![Size::ZERO; fields.len()];
363 let mut offset = Size::ZERO;
364 let mut largest_niche = None;
365 let mut largest_niche_available = 0;
367 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
369 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
370 align = align.max(AbiAndPrefAlign::new(prefix_align));
371 offset = prefix_size.align_to(prefix_align);
374 for &i in &inverse_memory_index {
375 let field = fields[i as usize];
377 self.tcx.sess.delay_span_bug(
380 "univariant: field #{} of `{}` comes after unsized field",
387 if field.is_unsized() {
391 // Invariant: offset < dl.obj_size_bound() <= 1<<61
392 let field_align = if let Some(pack) = pack {
393 field.align.min(AbiAndPrefAlign::new(pack))
397 offset = offset.align_to(field_align.abi);
398 align = align.max(field_align);
400 debug!("univariant offset: {:?} field: {:#?}", offset, field);
401 offsets[i as usize] = offset;
403 if !repr.hide_niche() {
404 if let Some(mut niche) = field.largest_niche.clone() {
405 let available = niche.available(dl);
406 if available > largest_niche_available {
407 largest_niche_available = available;
408 niche.offset += offset;
409 largest_niche = Some(niche);
414 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
417 if let Some(repr_align) = repr.align {
418 align = align.max(AbiAndPrefAlign::new(repr_align));
421 debug!("univariant min_size: {:?}", offset);
422 let min_size = offset;
424 // As stated above, inverse_memory_index holds field indices by increasing offset.
425 // This makes it an already-sorted view of the offsets vec.
426 // To invert it, consider:
427 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
428 // Field 5 would be the first element, so memory_index is i:
429 // Note: if we didn't optimize, it's already right.
432 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
434 let size = min_size.align_to(align.abi);
435 let mut abi = Abi::Aggregate { sized };
437 // Unpack newtype ABIs and find scalar pairs.
438 if sized && size.bytes() > 0 {
439 // All other fields must be ZSTs.
440 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
442 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
443 // We have exactly one non-ZST field.
444 (Some((i, field)), None, None) => {
445 // Field fills the struct and it has a scalar or scalar pair ABI.
446 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
449 // For plain scalars, or vectors of them, we can't unpack
450 // newtypes for `#[repr(C)]`, as that affects C ABIs.
451 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
452 abi = field.abi.clone();
454 // But scalar pairs are Rust-specific and get
455 // treated as aggregates by C ABIs anyway.
456 Abi::ScalarPair(..) => {
457 abi = field.abi.clone();
464 // Two non-ZST fields, and they're both scalars.
466 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
467 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
470 // Order by the memory placement, not source order.
471 let ((i, a), (j, b)) =
472 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
473 let pair = self.scalar_pair(a.clone(), b.clone());
474 let pair_offsets = match pair.fields {
475 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
476 assert_eq!(memory_index, &[0, 1]);
481 if offsets[i] == pair_offsets[0]
482 && offsets[j] == pair_offsets[1]
483 && align == pair.align
486 // We can use `ScalarPair` only when it matches our
487 // already computed layout (including `#[repr(C)]`).
496 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
497 abi = Abi::Uninhabited;
501 variants: Variants::Single { index: VariantIdx::new(0) },
502 fields: FieldsShape::Arbitrary { offsets, memory_index },
510 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
512 let param_env = self.param_env;
513 let dl = self.data_layout();
514 let scalar_unit = |value: Primitive| {
515 let bits = value.size(dl).bits();
516 assert!(bits <= 128);
517 Scalar { value, valid_range: WrappingRange { start: 0, end: (!0 >> (128 - bits)) } }
519 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
521 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
522 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
524 debug_assert!(!ty.has_infer_types_or_consts());
526 Ok(match *ty.kind() {
528 ty::Bool => tcx.intern_layout(Layout::scalar(
530 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
532 ty::Char => tcx.intern_layout(Layout::scalar(
535 value: Int(I32, false),
536 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
539 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
540 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
541 ty::Float(fty) => scalar(match fty {
542 ty::FloatTy::F32 => F32,
543 ty::FloatTy::F64 => F64,
546 let mut ptr = scalar_unit(Pointer);
547 ptr.valid_range = ptr.valid_range.with_start(1);
548 tcx.intern_layout(Layout::scalar(self, ptr))
552 ty::Never => tcx.intern_layout(Layout {
553 variants: Variants::Single { index: VariantIdx::new(0) },
554 fields: FieldsShape::Primitive,
555 abi: Abi::Uninhabited,
561 // Potentially-wide pointers.
562 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
563 let mut data_ptr = scalar_unit(Pointer);
564 if !ty.is_unsafe_ptr() {
565 data_ptr.valid_range = data_ptr.valid_range.with_start(1);
568 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
569 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
570 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
573 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
574 let metadata = match unsized_part.kind() {
576 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
578 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
580 let mut vtable = scalar_unit(Pointer);
581 vtable.valid_range = vtable.valid_range.with_start(1);
584 _ => return Err(LayoutError::Unknown(unsized_part)),
587 // Effectively a (ptr, meta) tuple.
588 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
591 // Arrays and slices.
592 ty::Array(element, mut count) => {
593 if count.has_projections() {
594 count = tcx.normalize_erasing_regions(param_env, count);
595 if count.has_projections() {
596 return Err(LayoutError::Unknown(ty));
600 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
601 let element = self.layout_of(element)?;
603 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
606 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
609 Abi::Aggregate { sized: true }
612 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
614 tcx.intern_layout(Layout {
615 variants: Variants::Single { index: VariantIdx::new(0) },
616 fields: FieldsShape::Array { stride: element.size, count },
619 align: element.align,
623 ty::Slice(element) => {
624 let element = self.layout_of(element)?;
625 tcx.intern_layout(Layout {
626 variants: Variants::Single { index: VariantIdx::new(0) },
627 fields: FieldsShape::Array { stride: element.size, count: 0 },
628 abi: Abi::Aggregate { sized: false },
630 align: element.align,
634 ty::Str => tcx.intern_layout(Layout {
635 variants: Variants::Single { index: VariantIdx::new(0) },
636 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
637 abi: Abi::Aggregate { sized: false },
644 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
645 ty::Dynamic(..) | ty::Foreign(..) => {
646 let mut unit = self.univariant_uninterned(
649 &ReprOptions::default(),
650 StructKind::AlwaysSized,
653 Abi::Aggregate { ref mut sized } => *sized = false,
656 tcx.intern_layout(unit)
659 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
661 ty::Closure(_, ref substs) => {
662 let tys = substs.as_closure().upvar_tys();
664 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
665 &ReprOptions::default(),
666 StructKind::AlwaysSized,
672 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
676 .map(|k| self.layout_of(k.expect_ty()))
677 .collect::<Result<Vec<_>, _>>()?,
678 &ReprOptions::default(),
683 // SIMD vector types.
684 ty::Adt(def, substs) if def.repr.simd() => {
685 if !def.is_struct() {
686 // Should have yielded E0517 by now.
687 tcx.sess.delay_span_bug(
689 "#[repr(simd)] was applied to an ADT that is not a struct",
691 return Err(LayoutError::Unknown(ty));
694 // Supported SIMD vectors are homogeneous ADTs with at least one field:
696 // * #[repr(simd)] struct S(T, T, T, T);
697 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
698 // * #[repr(simd)] struct S([T; 4])
700 // where T is a primitive scalar (integer/float/pointer).
702 // SIMD vectors with zero fields are not supported.
703 // (should be caught by typeck)
704 if def.non_enum_variant().fields.is_empty() {
705 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
708 // Type of the first ADT field:
709 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
711 // Heterogeneous SIMD vectors are not supported:
712 // (should be caught by typeck)
713 for fi in &def.non_enum_variant().fields {
714 if fi.ty(tcx, substs) != f0_ty {
715 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
719 // The element type and number of elements of the SIMD vector
720 // are obtained from:
722 // * the element type and length of the single array field, if
723 // the first field is of array type, or
725 // * the homogenous field type and the number of fields.
726 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
727 // First ADT field is an array:
729 // SIMD vectors with multiple array fields are not supported:
730 // (should be caught by typeck)
731 if def.non_enum_variant().fields.len() != 1 {
732 tcx.sess.fatal(&format!(
733 "monomorphising SIMD type `{}` with more than one array field",
738 // Extract the number of elements from the layout of the array field:
739 let len = if let Ok(TyAndLayout {
740 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
742 }) = self.layout_of(f0_ty)
746 return Err(LayoutError::Unknown(ty));
751 // First ADT field is not an array:
752 (f0_ty, def.non_enum_variant().fields.len() as _, false)
755 // SIMD vectors of zero length are not supported.
756 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
759 // Can't be caught in typeck if the array length is generic.
761 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
762 } else if e_len > MAX_SIMD_LANES {
763 tcx.sess.fatal(&format!(
764 "monomorphising SIMD type `{}` of length greater than {}",
769 // Compute the ABI of the element type:
770 let e_ly = self.layout_of(e_ty)?;
771 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
774 // This error isn't caught in typeck, e.g., if
775 // the element type of the vector is generic.
776 tcx.sess.fatal(&format!(
777 "monomorphising SIMD type `{}` with a non-primitive-scalar \
778 (integer/float/pointer) element type `{}`",
783 // Compute the size and alignment of the vector:
784 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
785 let align = dl.vector_align(size);
786 let size = size.align_to(align.abi);
788 // Compute the placement of the vector fields:
789 let fields = if is_array {
790 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
792 FieldsShape::Array { stride: e_ly.size, count: e_len }
795 tcx.intern_layout(Layout {
796 variants: Variants::Single { index: VariantIdx::new(0) },
798 abi: Abi::Vector { element: e_abi, count: e_len },
799 largest_niche: e_ly.largest_niche.clone(),
806 ty::Adt(def, substs) => {
807 // Cache the field layouts.
814 .map(|field| self.layout_of(field.ty(tcx, substs)))
815 .collect::<Result<Vec<_>, _>>()
817 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
820 if def.repr.pack.is_some() && def.repr.align.is_some() {
821 self.tcx.sess.delay_span_bug(
822 tcx.def_span(def.did),
823 "union cannot be packed and aligned",
825 return Err(LayoutError::Unknown(ty));
829 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
831 if let Some(repr_align) = def.repr.align {
832 align = align.max(AbiAndPrefAlign::new(repr_align));
835 let optimize = !def.repr.inhibit_union_abi_opt();
836 let mut size = Size::ZERO;
837 let mut abi = Abi::Aggregate { sized: true };
838 let index = VariantIdx::new(0);
839 for field in &variants[index] {
840 assert!(!field.is_unsized());
841 align = align.max(field.align);
843 // If all non-ZST fields have the same ABI, forward this ABI
844 if optimize && !field.is_zst() {
845 // Normalize scalar_unit to the maximal valid range
846 let field_abi = match &field.abi {
847 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
848 Abi::ScalarPair(x, y) => {
849 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
851 Abi::Vector { element: x, count } => {
852 Abi::Vector { element: scalar_unit(x.value), count: *count }
854 Abi::Uninhabited | Abi::Aggregate { .. } => {
855 Abi::Aggregate { sized: true }
859 if size == Size::ZERO {
860 // first non ZST: initialize 'abi'
862 } else if abi != field_abi {
863 // different fields have different ABI: reset to Aggregate
864 abi = Abi::Aggregate { sized: true };
868 size = cmp::max(size, field.size);
871 if let Some(pack) = def.repr.pack {
872 align = align.min(AbiAndPrefAlign::new(pack));
875 return Ok(tcx.intern_layout(Layout {
876 variants: Variants::Single { index },
877 fields: FieldsShape::Union(
878 NonZeroUsize::new(variants[index].len())
879 .ok_or(LayoutError::Unknown(ty))?,
884 size: size.align_to(align.abi),
888 // A variant is absent if it's uninhabited and only has ZST fields.
889 // Present uninhabited variants only require space for their fields,
890 // but *not* an encoding of the discriminant (e.g., a tag value).
891 // See issue #49298 for more details on the need to leave space
892 // for non-ZST uninhabited data (mostly partial initialization).
893 let absent = |fields: &[TyAndLayout<'_>]| {
894 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
895 let is_zst = fields.iter().all(|f| f.is_zst());
896 uninhabited && is_zst
898 let (present_first, present_second) = {
899 let mut present_variants = variants
901 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
902 (present_variants.next(), present_variants.next())
904 let present_first = match present_first {
905 Some(present_first) => present_first,
906 // Uninhabited because it has no variants, or only absent ones.
907 None if def.is_enum() => {
908 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
910 // If it's a struct, still compute a layout so that we can still compute the
912 None => VariantIdx::new(0),
915 let is_struct = !def.is_enum() ||
916 // Only one variant is present.
917 (present_second.is_none() &&
918 // Representation optimizations are allowed.
919 !def.repr.inhibit_enum_layout_opt());
921 // Struct, or univariant enum equivalent to a struct.
922 // (Typechecking will reject discriminant-sizing attrs.)
924 let v = present_first;
925 let kind = if def.is_enum() || variants[v].is_empty() {
926 StructKind::AlwaysSized
928 let param_env = tcx.param_env(def.did);
929 let last_field = def.variants[v].fields.last().unwrap();
931 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
933 StructKind::MaybeUnsized
935 StructKind::AlwaysSized
939 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
940 st.variants = Variants::Single { index: v };
941 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
943 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
944 // the asserts ensure that we are not using the
945 // `#[rustc_layout_scalar_valid_range(n)]`
946 // attribute to widen the range of anything as that would probably
947 // result in UB somewhere
948 // FIXME(eddyb) the asserts are probably not needed,
949 // as larger validity ranges would result in missed
950 // optimizations, *not* wrongly assuming the inner
951 // value is valid. e.g. unions enlarge validity ranges,
952 // because the values may be uninitialized.
953 if let Bound::Included(start) = start {
954 // FIXME(eddyb) this might be incorrect - it doesn't
955 // account for wrap-around (end < start) ranges.
956 assert!(scalar.valid_range.start <= start);
957 scalar.valid_range.start = start;
959 if let Bound::Included(end) = end {
960 // FIXME(eddyb) this might be incorrect - it doesn't
961 // account for wrap-around (end < start) ranges.
962 assert!(scalar.valid_range.end >= end);
963 scalar.valid_range.end = end;
966 // Update `largest_niche` if we have introduced a larger niche.
967 let niche = if def.repr.hide_niche() {
970 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
972 if let Some(niche) = niche {
973 match &st.largest_niche {
974 Some(largest_niche) => {
975 // Replace the existing niche even if they're equal,
976 // because this one is at a lower offset.
977 if largest_niche.available(dl) <= niche.available(dl) {
978 st.largest_niche = Some(niche);
981 None => st.largest_niche = Some(niche),
986 start == Bound::Unbounded && end == Bound::Unbounded,
987 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
993 return Ok(tcx.intern_layout(st));
996 // At this point, we have handled all unions and
997 // structs. (We have also handled univariant enums
998 // that allow representation optimization.)
999 assert!(def.is_enum());
1001 // The current code for niche-filling relies on variant indices
1002 // instead of actual discriminants, so dataful enums with
1003 // explicit discriminants (RFC #2363) would misbehave.
1004 let no_explicit_discriminants = def
1007 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1009 let mut niche_filling_layout = None;
1011 // Niche-filling enum optimization.
1012 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1013 let mut dataful_variant = None;
1014 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1016 // Find one non-ZST variant.
1017 'variants: for (v, fields) in variants.iter_enumerated() {
1023 if dataful_variant.is_none() {
1024 dataful_variant = Some(v);
1027 dataful_variant = None;
1032 niche_variants = *niche_variants.start().min(&v)..=v;
1035 if niche_variants.start() > niche_variants.end() {
1036 dataful_variant = None;
1039 if let Some(i) = dataful_variant {
1040 let count = (niche_variants.end().as_u32()
1041 - niche_variants.start().as_u32()
1044 // Find the field with the largest niche
1045 let niche_candidate = variants[i]
1048 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1049 .max_by_key(|(_, niche)| niche.available(dl));
1051 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1052 niche_candidate.and_then(|(field_index, niche)| {
1053 Some((field_index, niche, niche.reserve(self, count)?))
1056 let mut align = dl.aggregate_align;
1060 let mut st = self.univariant_uninterned(
1064 StructKind::AlwaysSized,
1066 st.variants = Variants::Single { index: j };
1068 align = align.max(st.align);
1072 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1074 let offset = st[i].fields.offset(field_index) + niche.offset;
1075 let size = st[i].size;
1077 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1081 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1082 Abi::ScalarPair(ref first, ref second) => {
1083 // We need to use scalar_unit to reset the
1084 // valid range to the maximal one for that
1085 // primitive, because only the niche is
1086 // guaranteed to be initialised, not the
1088 if offset.bytes() == 0 {
1090 niche_scalar.clone(),
1091 scalar_unit(second.value),
1095 scalar_unit(first.value),
1096 niche_scalar.clone(),
1100 _ => Abi::Aggregate { sized: true },
1105 Niche::from_scalar(dl, offset, niche_scalar.clone());
1107 niche_filling_layout = Some(Layout {
1108 variants: Variants::Multiple {
1110 tag_encoding: TagEncoding::Niche {
1118 fields: FieldsShape::Arbitrary {
1119 offsets: vec![offset],
1120 memory_index: vec![0],
1131 let (mut min, mut max) = (i128::MAX, i128::MIN);
1132 let discr_type = def.repr.discr_type();
1133 let bits = Integer::from_attr(self, discr_type).size().bits();
1134 for (i, discr) in def.discriminants(tcx) {
1135 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1138 let mut x = discr.val as i128;
1139 if discr_type.is_signed() {
1140 // sign extend the raw representation to be an i128
1141 x = (x << (128 - bits)) >> (128 - bits);
1150 // We might have no inhabited variants, so pretend there's at least one.
1151 if (min, max) == (i128::MAX, i128::MIN) {
1155 assert!(min <= max, "discriminant range is {}...{}", min, max);
1156 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1158 let mut align = dl.aggregate_align;
1159 let mut size = Size::ZERO;
1161 // We're interested in the smallest alignment, so start large.
1162 let mut start_align = Align::from_bytes(256).unwrap();
1163 assert_eq!(Integer::for_align(dl, start_align), None);
1165 // repr(C) on an enum tells us to make a (tag, union) layout,
1166 // so we need to grow the prefix alignment to be at least
1167 // the alignment of the union. (This value is used both for
1168 // determining the alignment of the overall enum, and the
1169 // determining the alignment of the payload after the tag.)
1170 let mut prefix_align = min_ity.align(dl).abi;
1172 for fields in &variants {
1173 for field in fields {
1174 prefix_align = prefix_align.max(field.align.abi);
1179 // Create the set of structs that represent each variant.
1180 let mut layout_variants = variants
1182 .map(|(i, field_layouts)| {
1183 let mut st = self.univariant_uninterned(
1187 StructKind::Prefixed(min_ity.size(), prefix_align),
1189 st.variants = Variants::Single { index: i };
1190 // Find the first field we can't move later
1191 // to make room for a larger discriminant.
1193 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1195 if !field.is_zst() || field.align.abi.bytes() != 1 {
1196 start_align = start_align.min(field.align.abi);
1200 size = cmp::max(size, st.size);
1201 align = align.max(st.align);
1204 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1206 // Align the maximum variant size to the largest alignment.
1207 size = size.align_to(align.abi);
1209 if size.bytes() >= dl.obj_size_bound() {
1210 return Err(LayoutError::SizeOverflow(ty));
1213 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1214 if typeck_ity < min_ity {
1215 // It is a bug if Layout decided on a greater discriminant size than typeck for
1216 // some reason at this point (based on values discriminant can take on). Mostly
1217 // because this discriminant will be loaded, and then stored into variable of
1218 // type calculated by typeck. Consider such case (a bug): typeck decided on
1219 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1220 // discriminant values. That would be a bug, because then, in codegen, in order
1221 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1222 // space necessary to represent would have to be discarded (or layout is wrong
1223 // on thinking it needs 16 bits)
1225 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1229 // However, it is fine to make discr type however large (as an optimisation)
1230 // after this point – we’ll just truncate the value we load in codegen.
1233 // Check to see if we should use a different type for the
1234 // discriminant. We can safely use a type with the same size
1235 // as the alignment of the first field of each variant.
1236 // We increase the size of the discriminant to avoid LLVM copying
1237 // padding when it doesn't need to. This normally causes unaligned
1238 // load/stores and excessive memcpy/memset operations. By using a
1239 // bigger integer size, LLVM can be sure about its contents and
1240 // won't be so conservative.
1242 // Use the initial field alignment
1243 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1246 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1249 // If the alignment is not larger than the chosen discriminant size,
1250 // don't use the alignment as the final size.
1254 // Patch up the variants' first few fields.
1255 let old_ity_size = min_ity.size();
1256 let new_ity_size = ity.size();
1257 for variant in &mut layout_variants {
1258 match variant.fields {
1259 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1261 if *i <= old_ity_size {
1262 assert_eq!(*i, old_ity_size);
1266 // We might be making the struct larger.
1267 if variant.size <= old_ity_size {
1268 variant.size = new_ity_size;
1276 let tag_mask = !0u128 >> (128 - ity.size().bits());
1278 value: Int(ity, signed),
1279 valid_range: WrappingRange {
1280 start: (min as u128 & tag_mask),
1281 end: (max as u128 & tag_mask),
1284 let mut abi = Abi::Aggregate { sized: true };
1285 if tag.value.size(dl) == size {
1286 abi = Abi::Scalar(tag.clone());
1288 // Try to use a ScalarPair for all tagged enums.
1289 let mut common_prim = None;
1290 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1291 let offsets = match layout_variant.fields {
1292 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1296 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1297 let (field, offset) = match (fields.next(), fields.next()) {
1298 (None, None) => continue,
1299 (Some(pair), None) => pair,
1305 let prim = match field.abi {
1306 Abi::Scalar(ref scalar) => scalar.value,
1312 if let Some(pair) = common_prim {
1313 // This is pretty conservative. We could go fancier
1314 // by conflating things like i32 and u32, or even
1315 // realising that (u8, u8) could just cohabit with
1317 if pair != (prim, offset) {
1322 common_prim = Some((prim, offset));
1325 if let Some((prim, offset)) = common_prim {
1326 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1327 let pair_offsets = match pair.fields {
1328 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1329 assert_eq!(memory_index, &[0, 1]);
1334 if pair_offsets[0] == Size::ZERO
1335 && pair_offsets[1] == *offset
1336 && align == pair.align
1337 && size == pair.size
1339 // We can use `ScalarPair` only when it matches our
1340 // already computed layout (including `#[repr(C)]`).
1346 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1347 abi = Abi::Uninhabited;
1350 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1352 let tagged_layout = Layout {
1353 variants: Variants::Multiple {
1355 tag_encoding: TagEncoding::Direct,
1357 variants: layout_variants,
1359 fields: FieldsShape::Arbitrary {
1360 offsets: vec![Size::ZERO],
1361 memory_index: vec![0],
1369 let best_layout = match (tagged_layout, niche_filling_layout) {
1370 (tagged_layout, Some(niche_filling_layout)) => {
1371 // Pick the smaller layout; otherwise,
1372 // pick the layout with the larger niche; otherwise,
1373 // pick tagged as it has simpler codegen.
1374 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1376 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1377 (layout.size, cmp::Reverse(niche_size))
1380 (tagged_layout, None) => tagged_layout,
1383 tcx.intern_layout(best_layout)
1386 // Types with no meaningful known layout.
1387 ty::Projection(_) | ty::Opaque(..) => {
1388 // NOTE(eddyb) `layout_of` query should've normalized these away,
1389 // if that was possible, so there's no reason to try again here.
1390 return Err(LayoutError::Unknown(ty));
1393 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1394 bug!("Layout::compute: unexpected type `{}`", ty)
1397 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1398 return Err(LayoutError::Unknown(ty));
1404 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1405 #[derive(Clone, Debug, PartialEq)]
1406 enum SavedLocalEligibility {
1408 Assigned(VariantIdx),
1409 // FIXME: Use newtype_index so we aren't wasting bytes
1410 Ineligible(Option<u32>),
1413 // When laying out generators, we divide our saved local fields into two
1414 // categories: overlap-eligible and overlap-ineligible.
1416 // Those fields which are ineligible for overlap go in a "prefix" at the
1417 // beginning of the layout, and always have space reserved for them.
1419 // Overlap-eligible fields are only assigned to one variant, so we lay
1420 // those fields out for each variant and put them right after the
1423 // Finally, in the layout details, we point to the fields from the
1424 // variants they are assigned to. It is possible for some fields to be
1425 // included in multiple variants. No field ever "moves around" in the
1426 // layout; its offset is always the same.
1428 // Also included in the layout are the upvars and the discriminant.
1429 // These are included as fields on the "outer" layout; they are not part
1431 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1432 /// Compute the eligibility and assignment of each local.
1433 fn generator_saved_local_eligibility(
1435 info: &GeneratorLayout<'tcx>,
1436 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1437 use SavedLocalEligibility::*;
1439 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1440 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1442 // The saved locals not eligible for overlap. These will get
1443 // "promoted" to the prefix of our generator.
1444 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1446 // Figure out which of our saved locals are fields in only
1447 // one variant. The rest are deemed ineligible for overlap.
1448 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1449 for local in fields {
1450 match assignments[*local] {
1452 assignments[*local] = Assigned(variant_index);
1455 // We've already seen this local at another suspension
1456 // point, so it is no longer a candidate.
1458 "removing local {:?} in >1 variant ({:?}, {:?})",
1463 ineligible_locals.insert(*local);
1464 assignments[*local] = Ineligible(None);
1471 // Next, check every pair of eligible locals to see if they
1473 for local_a in info.storage_conflicts.rows() {
1474 let conflicts_a = info.storage_conflicts.count(local_a);
1475 if ineligible_locals.contains(local_a) {
1479 for local_b in info.storage_conflicts.iter(local_a) {
1480 // local_a and local_b are storage live at the same time, therefore they
1481 // cannot overlap in the generator layout. The only way to guarantee
1482 // this is if they are in the same variant, or one is ineligible
1483 // (which means it is stored in every variant).
1484 if ineligible_locals.contains(local_b)
1485 || assignments[local_a] == assignments[local_b]
1490 // If they conflict, we will choose one to make ineligible.
1491 // This is not always optimal; it's just a greedy heuristic that
1492 // seems to produce good results most of the time.
1493 let conflicts_b = info.storage_conflicts.count(local_b);
1494 let (remove, other) =
1495 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1496 ineligible_locals.insert(remove);
1497 assignments[remove] = Ineligible(None);
1498 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1502 // Count the number of variants in use. If only one of them, then it is
1503 // impossible to overlap any locals in our layout. In this case it's
1504 // always better to make the remaining locals ineligible, so we can
1505 // lay them out with the other locals in the prefix and eliminate
1506 // unnecessary padding bytes.
1508 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1509 for assignment in &assignments {
1510 if let Assigned(idx) = assignment {
1511 used_variants.insert(*idx);
1514 if used_variants.count() < 2 {
1515 for assignment in assignments.iter_mut() {
1516 *assignment = Ineligible(None);
1518 ineligible_locals.insert_all();
1522 // Write down the order of our locals that will be promoted to the prefix.
1524 for (idx, local) in ineligible_locals.iter().enumerate() {
1525 assignments[local] = Ineligible(Some(idx as u32));
1528 debug!("generator saved local assignments: {:?}", assignments);
1530 (ineligible_locals, assignments)
1533 /// Compute the full generator layout.
1534 fn generator_layout(
1537 def_id: hir::def_id::DefId,
1538 substs: SubstsRef<'tcx>,
1539 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1540 use SavedLocalEligibility::*;
1542 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1544 let info = match tcx.generator_layout(def_id) {
1545 None => return Err(LayoutError::Unknown(ty)),
1548 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1550 // Build a prefix layout, including "promoting" all ineligible
1551 // locals as part of the prefix. We compute the layout of all of
1552 // these fields at once to get optimal packing.
1553 let tag_index = substs.as_generator().prefix_tys().count();
1555 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1556 let max_discr = (info.variant_fields.len() - 1) as u128;
1557 let discr_int = Integer::fit_unsigned(max_discr);
1558 let discr_int_ty = discr_int.to_ty(tcx, false);
1560 value: Primitive::Int(discr_int, false),
1561 valid_range: WrappingRange { start: 0, end: max_discr },
1563 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1564 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1566 let promoted_layouts = ineligible_locals
1568 .map(|local| subst_field(info.field_tys[local]))
1569 .map(|ty| tcx.mk_maybe_uninit(ty))
1570 .map(|ty| self.layout_of(ty));
1571 let prefix_layouts = substs
1574 .map(|ty| self.layout_of(ty))
1575 .chain(iter::once(Ok(tag_layout)))
1576 .chain(promoted_layouts)
1577 .collect::<Result<Vec<_>, _>>()?;
1578 let prefix = self.univariant_uninterned(
1581 &ReprOptions::default(),
1582 StructKind::AlwaysSized,
1585 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1587 // Split the prefix layout into the "outer" fields (upvars and
1588 // discriminant) and the "promoted" fields. Promoted fields will
1589 // get included in each variant that requested them in
1591 debug!("prefix = {:#?}", prefix);
1592 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1593 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1594 let mut inverse_memory_index = invert_mapping(&memory_index);
1596 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1597 // "outer" and "promoted" fields respectively.
1598 let b_start = (tag_index + 1) as u32;
1599 let offsets_b = offsets.split_off(b_start as usize);
1600 let offsets_a = offsets;
1602 // Disentangle the "a" and "b" components of `inverse_memory_index`
1603 // by preserving the order but keeping only one disjoint "half" each.
1604 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1605 let inverse_memory_index_b: Vec<_> =
1606 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1607 inverse_memory_index.retain(|&i| i < b_start);
1608 let inverse_memory_index_a = inverse_memory_index;
1610 // Since `inverse_memory_index_{a,b}` each only refer to their
1611 // respective fields, they can be safely inverted
1612 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1613 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1616 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1617 (outer_fields, offsets_b, memory_index_b)
1622 let mut size = prefix.size;
1623 let mut align = prefix.align;
1627 .map(|(index, variant_fields)| {
1628 // Only include overlap-eligible fields when we compute our variant layout.
1629 let variant_only_tys = variant_fields
1631 .filter(|local| match assignments[**local] {
1632 Unassigned => bug!(),
1633 Assigned(v) if v == index => true,
1634 Assigned(_) => bug!("assignment does not match variant"),
1635 Ineligible(_) => false,
1637 .map(|local| subst_field(info.field_tys[*local]));
1639 let mut variant = self.univariant_uninterned(
1642 .map(|ty| self.layout_of(ty))
1643 .collect::<Result<Vec<_>, _>>()?,
1644 &ReprOptions::default(),
1645 StructKind::Prefixed(prefix_size, prefix_align.abi),
1647 variant.variants = Variants::Single { index };
1649 let (offsets, memory_index) = match variant.fields {
1650 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1654 // Now, stitch the promoted and variant-only fields back together in
1655 // the order they are mentioned by our GeneratorLayout.
1656 // Because we only use some subset (that can differ between variants)
1657 // of the promoted fields, we can't just pick those elements of the
1658 // `promoted_memory_index` (as we'd end up with gaps).
1659 // So instead, we build an "inverse memory_index", as if all of the
1660 // promoted fields were being used, but leave the elements not in the
1661 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1662 // obtain a valid (bijective) mapping.
1663 const INVALID_FIELD_IDX: u32 = !0;
1664 let mut combined_inverse_memory_index =
1665 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1666 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1667 let combined_offsets = variant_fields
1671 let (offset, memory_index) = match assignments[*local] {
1672 Unassigned => bug!(),
1674 let (offset, memory_index) =
1675 offsets_and_memory_index.next().unwrap();
1676 (offset, promoted_memory_index.len() as u32 + memory_index)
1678 Ineligible(field_idx) => {
1679 let field_idx = field_idx.unwrap() as usize;
1680 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1683 combined_inverse_memory_index[memory_index as usize] = i as u32;
1688 // Remove the unused slots and invert the mapping to obtain the
1689 // combined `memory_index` (also see previous comment).
1690 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1691 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1693 variant.fields = FieldsShape::Arbitrary {
1694 offsets: combined_offsets,
1695 memory_index: combined_memory_index,
1698 size = size.max(variant.size);
1699 align = align.max(variant.align);
1702 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1704 size = size.align_to(align.abi);
1706 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1710 Abi::Aggregate { sized: true }
1713 let layout = tcx.intern_layout(Layout {
1714 variants: Variants::Multiple {
1716 tag_encoding: TagEncoding::Direct,
1717 tag_field: tag_index,
1720 fields: outer_fields,
1722 largest_niche: prefix.largest_niche,
1726 debug!("generator layout ({:?}): {:#?}", ty, layout);
1730 /// This is invoked by the `layout_of` query to record the final
1731 /// layout of each type.
1733 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1734 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1735 // for dumping later.
1736 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1737 self.record_layout_for_printing_outlined(layout)
1741 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1742 // Ignore layouts that are done with non-empty environments or
1743 // non-monomorphic layouts, as the user only wants to see the stuff
1744 // resulting from the final codegen session.
1745 if layout.ty.definitely_has_param_types_or_consts(self.tcx)
1746 || !self.param_env.caller_bounds().is_empty()
1751 // (delay format until we actually need it)
1752 let record = |kind, packed, opt_discr_size, variants| {
1753 let type_desc = format!("{:?}", layout.ty);
1754 self.tcx.sess.code_stats.record_type_size(
1765 let adt_def = match *layout.ty.kind() {
1766 ty::Adt(ref adt_def, _) => {
1767 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1771 ty::Closure(..) => {
1772 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1773 record(DataTypeKind::Closure, false, None, vec![]);
1778 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1783 let adt_kind = adt_def.adt_kind();
1784 let adt_packed = adt_def.repr.pack.is_some();
1786 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1787 let mut min_size = Size::ZERO;
1788 let field_info: Vec<_> = flds
1792 let field_layout = layout.field(self, i);
1793 let offset = layout.fields.offset(i);
1794 let field_end = offset + field_layout.size;
1795 if min_size < field_end {
1796 min_size = field_end;
1799 name: name.to_string(),
1800 offset: offset.bytes(),
1801 size: field_layout.size.bytes(),
1802 align: field_layout.align.abi.bytes(),
1808 name: n.map(|n| n.to_string()),
1809 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1810 align: layout.align.abi.bytes(),
1811 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1816 match layout.variants {
1817 Variants::Single { index } => {
1818 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1819 if !adt_def.variants.is_empty() {
1820 let variant_def = &adt_def.variants[index];
1821 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1826 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1829 // (This case arises for *empty* enums; so give it
1831 record(adt_kind.into(), adt_packed, None, vec![]);
1835 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1837 "print-type-size `{:#?}` adt general variants def {}",
1839 adt_def.variants.len()
1841 let variant_infos: Vec<_> = adt_def
1844 .map(|(i, variant_def)| {
1845 let fields: Vec<_> =
1846 variant_def.fields.iter().map(|f| f.ident.name).collect();
1848 Some(variant_def.ident),
1850 layout.for_variant(self, i),
1857 match tag_encoding {
1858 TagEncoding::Direct => Some(tag.value.size(self)),
1868 /// Type size "skeleton", i.e., the only information determining a type's size.
1869 /// While this is conservative, (aside from constant sizes, only pointers,
1870 /// newtypes thereof and null pointer optimized enums are allowed), it is
1871 /// enough to statically check common use cases of transmute.
1872 #[derive(Copy, Clone, Debug)]
1873 pub enum SizeSkeleton<'tcx> {
1874 /// Any statically computable Layout.
1877 /// A potentially-fat pointer.
1879 /// If true, this pointer is never null.
1881 /// The type which determines the unsized metadata, if any,
1882 /// of this pointer. Either a type parameter or a projection
1883 /// depending on one, with regions erased.
1888 impl<'tcx> SizeSkeleton<'tcx> {
1892 param_env: ty::ParamEnv<'tcx>,
1893 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1894 debug_assert!(!ty.has_infer_types_or_consts());
1896 // First try computing a static layout.
1897 let err = match tcx.layout_of(param_env.and(ty)) {
1899 return Ok(SizeSkeleton::Known(layout.size));
1905 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1906 let non_zero = !ty.is_unsafe_ptr();
1907 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1909 ty::Param(_) | ty::Projection(_) => {
1910 debug_assert!(tail.definitely_has_param_types_or_consts(tcx));
1911 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1914 "SizeSkeleton::compute({}): layout errored ({}), yet \
1915 tail `{}` is not a type parameter or a projection",
1923 ty::Adt(def, substs) => {
1924 // Only newtypes and enums w/ nullable pointer optimization.
1925 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1929 // Get a zero-sized variant or a pointer newtype.
1930 let zero_or_ptr_variant = |i| {
1931 let i = VariantIdx::new(i);
1932 let fields = def.variants[i]
1935 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1937 for field in fields {
1940 SizeSkeleton::Known(size) => {
1941 if size.bytes() > 0 {
1945 SizeSkeleton::Pointer { .. } => {
1956 let v0 = zero_or_ptr_variant(0)?;
1958 if def.variants.len() == 1 {
1959 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1960 return Ok(SizeSkeleton::Pointer {
1962 || match tcx.layout_scalar_valid_range(def.did) {
1963 (Bound::Included(start), Bound::Unbounded) => start > 0,
1964 (Bound::Included(start), Bound::Included(end)) => {
1965 0 < start && start < end
1976 let v1 = zero_or_ptr_variant(1)?;
1977 // Nullable pointer enum optimization.
1979 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1980 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1981 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1987 ty::Projection(_) | ty::Opaque(..) => {
1988 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1989 if ty == normalized {
1992 SizeSkeleton::compute(normalized, tcx, param_env)
2000 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2001 match (self, other) {
2002 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2003 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2011 pub trait HasTyCtxt<'tcx>: HasDataLayout {
2012 fn tcx(&self) -> TyCtxt<'tcx>;
2015 pub trait HasParamEnv<'tcx> {
2016 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2019 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2021 fn data_layout(&self) -> &TargetDataLayout {
2026 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2028 fn tcx(&self) -> TyCtxt<'tcx> {
2033 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2035 fn data_layout(&self) -> &TargetDataLayout {
2040 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2042 fn tcx(&self) -> TyCtxt<'tcx> {
2047 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2048 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2053 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2054 fn data_layout(&self) -> &TargetDataLayout {
2055 self.tcx.data_layout()
2059 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2060 fn tcx(&self) -> TyCtxt<'tcx> {
2065 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2067 impl LayoutOf<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2069 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2071 /// Computes the layout of a type. Note that this implicitly
2072 /// executes in "reveal all" mode, and will normalize the input type.
2074 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2075 self.tcx.layout_of(self.param_env.and(ty))
2079 impl LayoutOf<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2081 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2083 /// Computes the layout of a type. Note that this implicitly
2084 /// executes in "reveal all" mode, and will normalize the input type.
2086 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2087 self.tcx.layout_of(self.param_env.and(ty))
2091 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2093 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2095 fn ty_and_layout_for_variant(
2096 this: TyAndLayout<'tcx>,
2098 variant_index: VariantIdx,
2099 ) -> TyAndLayout<'tcx> {
2100 let layout = match this.variants {
2101 Variants::Single { index }
2102 // If all variants but one are uninhabited, the variant layout is the enum layout.
2103 if index == variant_index &&
2104 // Don't confuse variants of uninhabited enums with the enum itself.
2105 // For more details see https://github.com/rust-lang/rust/issues/69763.
2106 this.fields != FieldsShape::Primitive =>
2111 Variants::Single { index } => {
2113 let param_env = cx.param_env();
2115 // Deny calling for_variant more than once for non-Single enums.
2116 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2117 assert_eq!(original_layout.variants, Variants::Single { index });
2120 let fields = match this.ty.kind() {
2121 ty::Adt(def, _) if def.variants.is_empty() =>
2122 bug!("for_variant called on zero-variant enum"),
2123 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2126 tcx.intern_layout(Layout {
2127 variants: Variants::Single { index: variant_index },
2128 fields: match NonZeroUsize::new(fields) {
2129 Some(fields) => FieldsShape::Union(fields),
2130 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2132 abi: Abi::Uninhabited,
2133 largest_niche: None,
2134 align: tcx.data_layout.i8_align,
2139 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2142 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2144 TyAndLayout { ty: this.ty, layout }
2147 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2148 enum TyMaybeWithLayout<'tcx> {
2150 TyAndLayout(TyAndLayout<'tcx>),
2153 fn field_ty_or_layout(
2154 this: TyAndLayout<'tcx>,
2155 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2157 ) -> TyMaybeWithLayout<'tcx> {
2159 let tag_layout = |tag: &Scalar| -> TyAndLayout<'tcx> {
2160 let layout = Layout::scalar(cx, tag.clone());
2161 TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
2164 match *this.ty.kind() {
2173 | ty::GeneratorWitness(..)
2175 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2177 // Potentially-fat pointers.
2178 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2179 assert!(i < this.fields.count());
2181 // Reuse the fat `*T` type as its own thin pointer data field.
2182 // This provides information about, e.g., DST struct pointees
2183 // (which may have no non-DST form), and will work as long
2184 // as the `Abi` or `FieldsShape` is checked by users.
2186 let nil = tcx.mk_unit();
2187 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2190 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2193 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2194 // the `Result` should always work because the type is
2195 // always either `*mut ()` or `&'static mut ()`.
2196 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2198 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2202 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2203 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2204 ty::Dynamic(_, _) => {
2205 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2206 tcx.lifetimes.re_static,
2207 tcx.mk_array(tcx.types.usize, 3),
2209 /* FIXME: use actual fn pointers
2210 Warning: naively computing the number of entries in the
2211 vtable by counting the methods on the trait + methods on
2212 all parent traits does not work, because some methods can
2213 be not object safe and thus excluded from the vtable.
2214 Increase this counter if you tried to implement this but
2215 failed to do it without duplicating a lot of code from
2216 other places in the compiler: 2
2218 tcx.mk_array(tcx.types.usize, 3),
2219 tcx.mk_array(Option<fn()>),
2223 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2227 // Arrays and slices.
2228 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2229 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2231 // Tuples, generators and closures.
2232 ty::Closure(_, ref substs) => field_ty_or_layout(
2233 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2238 ty::Generator(def_id, ref substs, _) => match this.variants {
2239 Variants::Single { index } => TyMaybeWithLayout::Ty(
2242 .state_tys(def_id, tcx)
2243 .nth(index.as_usize())
2248 Variants::Multiple { ref tag, tag_field, .. } => {
2250 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2252 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2256 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2259 ty::Adt(def, substs) => {
2260 match this.variants {
2261 Variants::Single { index } => {
2262 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2265 // Discriminant field for enums (where applicable).
2266 Variants::Multiple { ref tag, .. } => {
2268 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2275 | ty::Placeholder(..)
2279 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2283 match field_ty_or_layout(this, cx, i) {
2284 TyMaybeWithLayout::Ty(field_ty) => {
2285 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2287 "failed to get layout for `{}`: {},\n\
2288 despite it being a field (#{}) of an existing layout: {:#?}",
2296 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2300 fn ty_and_layout_pointee_info_at(
2301 this: TyAndLayout<'tcx>,
2304 ) -> Option<PointeeInfo> {
2306 let param_env = cx.param_env();
2308 let addr_space_of_ty = |ty: Ty<'tcx>| {
2309 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2312 let pointee_info = match *this.ty.kind() {
2313 ty::RawPtr(mt) if offset.bytes() == 0 => {
2314 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2316 align: layout.align.abi,
2318 address_space: addr_space_of_ty(mt.ty),
2321 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2322 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2324 align: layout.align.abi,
2326 address_space: cx.data_layout().instruction_address_space,
2329 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2330 let address_space = addr_space_of_ty(ty);
2331 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2332 // Use conservative pointer kind if not optimizing. This saves us the
2333 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2334 // attributes in LLVM have compile-time cost even in unoptimized builds).
2338 hir::Mutability::Not => {
2339 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2345 hir::Mutability::Mut => {
2346 // References to self-referential structures should not be considered
2347 // noalias, as another pointer to the structure can be obtained, that
2348 // is not based-on the original reference. We consider all !Unpin
2349 // types to be potentially self-referential here.
2350 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2351 PointerKind::UniqueBorrowed
2359 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2361 align: layout.align.abi,
2368 let mut data_variant = match this.variants {
2369 // Within the discriminant field, only the niche itself is
2370 // always initialized, so we only check for a pointer at its
2373 // If the niche is a pointer, it's either valid (according
2374 // to its type), or null (which the niche field's scalar
2375 // validity range encodes). This allows using
2376 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2377 // this will continue to work as long as we don't start
2378 // using more niches than just null (e.g., the first page of
2379 // the address space, or unaligned pointers).
2380 Variants::Multiple {
2381 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2384 } if this.fields.offset(tag_field) == offset => {
2385 Some(this.for_variant(cx, dataful_variant))
2390 if let Some(variant) = data_variant {
2391 // We're not interested in any unions.
2392 if let FieldsShape::Union(_) = variant.fields {
2393 data_variant = None;
2397 let mut result = None;
2399 if let Some(variant) = data_variant {
2400 let ptr_end = offset + Pointer.size(cx);
2401 for i in 0..variant.fields.count() {
2402 let field_start = variant.fields.offset(i);
2403 if field_start <= offset {
2404 let field = variant.field(cx, i);
2405 result = field.to_result().ok().and_then(|field| {
2406 if ptr_end <= field_start + field.size {
2407 // We found the right field, look inside it.
2409 field.pointee_info_at(cx, offset - field_start);
2415 if result.is_some() {
2422 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2423 if let Some(ref mut pointee) = result {
2424 if let ty::Adt(def, _) = this.ty.kind() {
2425 if def.is_box() && offset.bytes() == 0 {
2426 pointee.safe = Some(PointerKind::UniqueOwned);
2436 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2446 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2448 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2449 use crate::ty::layout::LayoutError::*;
2450 mem::discriminant(self).hash_stable(hcx, hasher);
2453 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2458 impl<'tcx> ty::Instance<'tcx> {
2459 // NOTE(eddyb) this is private to avoid using it from outside of
2460 // `FnAbi::of_instance` - any other uses are either too high-level
2461 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2462 // or should go through `FnAbi` instead, to avoid losing any
2463 // adjustments `FnAbi::of_instance` might be performing.
2464 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2465 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2466 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2469 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2470 // parameters unused if they show up in the signature, but not in the `mir::Body`
2471 // (i.e. due to being inside a projection that got normalized, see
2472 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2473 // track of a polymorphization `ParamEnv` to allow normalizing later.
2474 let mut sig = match *ty.kind() {
2475 ty::FnDef(def_id, substs) => tcx
2476 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2477 .subst(tcx, substs),
2478 _ => unreachable!(),
2481 if let ty::InstanceDef::VtableShim(..) = self.def {
2482 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2483 sig = sig.map_bound(|mut sig| {
2484 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2485 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2486 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2492 ty::Closure(def_id, substs) => {
2493 let sig = substs.as_closure().sig();
2495 let bound_vars = tcx.mk_bound_variable_kinds(
2498 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2500 let br = ty::BoundRegion {
2501 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2502 kind: ty::BoundRegionKind::BrEnv,
2504 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2505 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2507 let sig = sig.skip_binder();
2508 ty::Binder::bind_with_vars(
2510 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2519 ty::Generator(_, substs, _) => {
2520 let sig = substs.as_generator().poly_sig();
2522 let bound_vars = tcx.mk_bound_variable_kinds(
2525 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2527 let br = ty::BoundRegion {
2528 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2529 kind: ty::BoundRegionKind::BrEnv,
2531 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2532 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2534 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2535 let pin_adt_ref = tcx.adt_def(pin_did);
2536 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2537 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2539 let sig = sig.skip_binder();
2540 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2541 let state_adt_ref = tcx.adt_def(state_did);
2542 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2543 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2544 ty::Binder::bind_with_vars(
2546 [env_ty, sig.resume_ty].iter(),
2549 hir::Unsafety::Normal,
2550 rustc_target::spec::abi::Abi::Rust,
2555 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2560 pub trait FnAbiExt<'tcx, C>
2562 C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2566 + HasParamEnv<'tcx>,
2568 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2570 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2571 /// instead, where the instance is an `InstanceDef::Virtual`.
2572 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2574 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2575 /// direct calls to an `fn`.
2577 /// NB: that includes virtual calls, which are represented by "direct calls"
2578 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2579 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2583 sig: ty::PolyFnSig<'tcx>,
2584 extra_args: &[Ty<'tcx>],
2585 caller_location: Option<Ty<'tcx>>,
2586 codegen_fn_attr_flags: CodegenFnAttrFlags,
2587 make_self_ptr_thin: bool,
2589 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2592 /// Calculates whether a function's ABI can unwind or not.
2594 /// This takes two primary parameters:
2596 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2597 /// codegen attrs for a defined function. For function pointers this set of
2598 /// flags is the empty set. This is only applicable for Rust-defined
2599 /// functions, and generally isn't needed except for small optimizations where
2600 /// we try to say a function which otherwise might look like it could unwind
2601 /// doesn't actually unwind (such as for intrinsics and such).
2603 /// * `abi` - this is the ABI that the function is defined with. This is the
2604 /// primary factor for determining whether a function can unwind or not.
2606 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2607 /// panics are implemented with unwinds on most platform (when
2608 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2609 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2610 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2611 /// defined for each ABI individually, but it always corresponds to some form of
2612 /// stack-based unwinding (the exact mechanism of which varies
2613 /// platform-by-platform).
2615 /// Rust functions are classfied whether or not they can unwind based on the
2616 /// active "panic strategy". In other words Rust functions are considered to
2617 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2618 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2619 /// only if the final panic mode is panic=abort. In this scenario any code
2620 /// previously compiled assuming that a function can unwind is still correct, it
2621 /// just never happens to actually unwind at runtime.
2623 /// This function's answer to whether or not a function can unwind is quite
2624 /// impactful throughout the compiler. This affects things like:
2626 /// * Calling a function which can't unwind means codegen simply ignores any
2627 /// associated unwinding cleanup.
2628 /// * Calling a function which can unwind from a function which can't unwind
2629 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2630 /// aborts the process.
2631 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2632 /// affects various optimizations and codegen.
2634 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2635 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2636 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2637 /// might (from a foreign exception or similar).
2639 pub fn fn_can_unwind(
2641 codegen_fn_attr_flags: CodegenFnAttrFlags,
2644 // Special attribute for functions which can't unwind.
2645 if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2649 // Otherwise if this isn't special then unwinding is generally determined by
2650 // the ABI of the itself. ABIs like `C` have variants which also
2651 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2652 // ABIs have such an option. Otherwise the only other thing here is Rust
2653 // itself, and those ABIs are determined by the panic strategy configured
2654 // for this compilation.
2656 // Unfortunately at this time there's also another caveat. Rust [RFC
2657 // 2945][rfc] has been accepted and is in the process of being implemented
2658 // and stabilized. In this interim state we need to deal with historical
2659 // rustc behavior as well as plan for future rustc behavior.
2661 // Historically functions declared with `extern "C"` were marked at the
2662 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2663 // or not. This is UB for functions in `panic=unwind` mode that then
2664 // actually panic and unwind. Note that this behavior is true for both
2665 // externally declared functions as well as Rust-defined function.
2667 // To fix this UB rustc would like to change in the future to catch unwinds
2668 // from function calls that may unwind within a Rust-defined `extern "C"`
2669 // function and forcibly abort the process, thereby respecting the
2670 // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2671 // ready to roll out, so determining whether or not the `C` family of ABIs
2672 // unwinds is conditional not only on their definition but also whether the
2673 // `#![feature(c_unwind)]` feature gate is active.
2675 // Note that this means that unlike historical compilers rustc now, by
2676 // default, unconditionally thinks that the `C` ABI may unwind. This will
2677 // prevent some optimization opportunities, however, so we try to scope this
2678 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2679 // to `panic=abort`).
2681 // Eventually the check against `c_unwind` here will ideally get removed and
2682 // this'll be a little cleaner as it'll be a straightforward check of the
2685 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2688 C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
2690 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2704 | AvrNonBlockingInterrupt
2705 | CCmseNonSecureCall
2709 | Unadjusted => false,
2710 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2715 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2716 use rustc_target::spec::abi::Abi::*;
2717 match tcx.sess.target.adjust_abi(abi) {
2718 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2720 // It's the ABI's job to select this, not ours.
2721 System { .. } => bug!("system abi should be selected elsewhere"),
2722 EfiApi => bug!("eficall abi should be selected elsewhere"),
2724 Stdcall { .. } => Conv::X86Stdcall,
2725 Fastcall => Conv::X86Fastcall,
2726 Vectorcall => Conv::X86VectorCall,
2727 Thiscall { .. } => Conv::X86ThisCall,
2728 C { .. } => Conv::C,
2729 Unadjusted => Conv::C,
2730 Win64 => Conv::X86_64Win64,
2731 SysV64 => Conv::X86_64SysV,
2732 Aapcs => Conv::ArmAapcs,
2733 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2734 PtxKernel => Conv::PtxKernel,
2735 Msp430Interrupt => Conv::Msp430Intr,
2736 X86Interrupt => Conv::X86Intr,
2737 AmdGpuKernel => Conv::AmdGpuKernel,
2738 AvrInterrupt => Conv::AvrInterrupt,
2739 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2742 // These API constants ought to be more specific...
2747 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2749 C: LayoutOf<'tcx, Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2753 + HasParamEnv<'tcx>,
2755 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2756 call::FnAbi::new_internal(cx, sig, extra_args, None, CodegenFnAttrFlags::empty(), false)
2759 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2760 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2762 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2763 Some(cx.tcx().caller_location_ty())
2768 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2770 call::FnAbi::new_internal(
2776 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2782 sig: ty::PolyFnSig<'tcx>,
2783 extra_args: &[Ty<'tcx>],
2784 caller_location: Option<Ty<'tcx>>,
2785 codegen_fn_attr_flags: CodegenFnAttrFlags,
2786 force_thin_self_ptr: bool,
2788 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2790 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2792 let conv = conv_from_spec_abi(cx.tcx(), sig.abi);
2794 let mut inputs = sig.inputs();
2795 let extra_args = if sig.abi == RustCall {
2796 assert!(!sig.c_variadic && extra_args.is_empty());
2798 if let Some(input) = sig.inputs().last() {
2799 if let ty::Tuple(tupled_arguments) = input.kind() {
2800 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2801 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2804 "argument to function with \"rust-call\" ABI \
2810 "argument to function with \"rust-call\" ABI \
2815 assert!(sig.c_variadic || extra_args.is_empty());
2819 let target = &cx.tcx().sess.target;
2820 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2821 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2822 let linux_s390x_gnu_like =
2823 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2824 let linux_sparc64_gnu_like =
2825 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2826 let linux_powerpc_gnu_like =
2827 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2829 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2831 // Handle safe Rust thin and fat pointers.
2832 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2834 layout: TyAndLayout<'tcx>,
2837 // Booleans are always an i1 that needs to be zero-extended.
2838 if scalar.is_bool() {
2839 attrs.ext(ArgExtension::Zext);
2843 // Only pointer types handled below.
2844 if scalar.value != Pointer {
2848 if !scalar.valid_range.contains_zero() {
2849 attrs.set(ArgAttribute::NonNull);
2852 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2853 if let Some(kind) = pointee.safe {
2854 attrs.pointee_align = Some(pointee.align);
2856 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2857 // for the entire duration of the function as they can be deallocated
2858 // at any time. Set their valid size to 0.
2859 attrs.pointee_size = match kind {
2860 PointerKind::UniqueOwned => Size::ZERO,
2864 // `Box` pointer parameters never alias because ownership is transferred
2865 // `&mut` pointer parameters never alias other parameters,
2866 // or mutable global data
2868 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2869 // and can be marked as both `readonly` and `noalias`, as
2870 // LLVM's definition of `noalias` is based solely on memory
2871 // dependencies rather than pointer equality
2873 // Due to miscompiles in LLVM < 12, we apply a separate NoAliasMutRef attribute
2874 // for UniqueBorrowed arguments, so that the codegen backend can decide
2875 // whether or not to actually emit the attribute.
2876 let no_alias = match kind {
2877 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
2878 PointerKind::UniqueOwned => true,
2879 PointerKind::Frozen => !is_return,
2882 attrs.set(ArgAttribute::NoAlias);
2885 if kind == PointerKind::Frozen && !is_return {
2886 attrs.set(ArgAttribute::ReadOnly);
2889 if kind == PointerKind::UniqueBorrowed && !is_return {
2890 attrs.set(ArgAttribute::NoAliasMutRef);
2896 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2897 let is_return = arg_idx.is_none();
2899 let layout = cx.layout_of(ty);
2900 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2901 // Don't pass the vtable, it's not an argument of the virtual fn.
2902 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2903 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2904 make_thin_self_ptr(cx, layout)
2909 let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2910 let mut attrs = ArgAttributes::new();
2911 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2915 if arg.layout.is_zst() {
2916 // For some forsaken reason, x86_64-pc-windows-gnu
2917 // doesn't ignore zero-sized struct arguments.
2918 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2922 && !linux_s390x_gnu_like
2923 && !linux_sparc64_gnu_like
2924 && !linux_powerpc_gnu_like)
2926 arg.mode = PassMode::Ignore;
2933 let mut fn_abi = FnAbi {
2934 ret: arg_of(sig.output(), None),
2939 .chain(caller_location)
2941 .map(|(i, ty)| arg_of(ty, Some(i)))
2943 c_variadic: sig.c_variadic,
2944 fixed_count: inputs.len(),
2946 can_unwind: fn_can_unwind(cx.tcx(), codegen_fn_attr_flags, sig.abi),
2948 fn_abi.adjust_for_abi(cx, sig.abi);
2949 debug!("FnAbi::new_internal = {:?}", fn_abi);
2953 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2954 if abi == SpecAbi::Unadjusted {
2958 if abi == SpecAbi::Rust
2959 || abi == SpecAbi::RustCall
2960 || abi == SpecAbi::RustIntrinsic
2961 || abi == SpecAbi::PlatformIntrinsic
2963 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2964 if arg.is_ignore() {
2968 match arg.layout.abi {
2969 Abi::Aggregate { .. } => {}
2971 // This is a fun case! The gist of what this is doing is
2972 // that we want callers and callees to always agree on the
2973 // ABI of how they pass SIMD arguments. If we were to *not*
2974 // make these arguments indirect then they'd be immediates
2975 // in LLVM, which means that they'd used whatever the
2976 // appropriate ABI is for the callee and the caller. That
2977 // means, for example, if the caller doesn't have AVX
2978 // enabled but the callee does, then passing an AVX argument
2979 // across this boundary would cause corrupt data to show up.
2981 // This problem is fixed by unconditionally passing SIMD
2982 // arguments through memory between callers and callees
2983 // which should get them all to agree on ABI regardless of
2984 // target feature sets. Some more information about this
2985 // issue can be found in #44367.
2987 // Note that the platform intrinsic ABI is exempt here as
2988 // that's how we connect up to LLVM and it's unstable
2989 // anyway, we control all calls to it in libstd.
2991 if abi != SpecAbi::PlatformIntrinsic
2992 && cx.tcx().sess.target.simd_types_indirect =>
2994 arg.make_indirect();
3001 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
3002 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
3003 let max_by_val_size = Pointer.size(cx) * 2;
3004 let size = arg.layout.size;
3006 if arg.layout.is_unsized() || size > max_by_val_size {
3007 arg.make_indirect();
3009 // We want to pass small aggregates as immediates, but using
3010 // a LLVM aggregate type for this leads to bad optimizations,
3011 // so we pick an appropriately sized integer type instead.
3012 arg.cast_to(Reg { kind: RegKind::Integer, size });
3015 fixup(&mut self.ret);
3016 for arg in &mut self.args {
3022 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
3023 cx.tcx().sess.fatal(&msg);
3028 fn make_thin_self_ptr<'tcx>(
3029 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3030 layout: TyAndLayout<'tcx>,
3031 ) -> TyAndLayout<'tcx> {
3033 let fat_pointer_ty = if layout.is_unsized() {
3034 // unsized `self` is passed as a pointer to `self`
3035 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3036 tcx.mk_mut_ptr(layout.ty)
3039 Abi::ScalarPair(..) => (),
3040 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3043 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3044 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3045 // elsewhere in the compiler as a method on a `dyn Trait`.
3046 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3047 // get a built-in pointer type
3048 let mut fat_pointer_layout = layout;
3049 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3050 && !fat_pointer_layout.ty.is_region_ptr()
3052 for i in 0..fat_pointer_layout.fields.count() {
3053 let field_layout = fat_pointer_layout.field(cx, i);
3055 if !field_layout.is_zst() {
3056 fat_pointer_layout = field_layout;
3057 continue 'descend_newtypes;
3061 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3064 fat_pointer_layout.ty
3067 // we now have a type like `*mut RcBox<dyn Trait>`
3068 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3069 // this is understood as a special case elsewhere in the compiler
3070 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3075 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3076 // should always work because the type is always `*mut ()`.
3077 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()