1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44 impl IntegerExt for Integer {
45 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
46 match (*self, signed) {
47 (I8, false) => tcx.types.u8,
48 (I16, false) => tcx.types.u16,
49 (I32, false) => tcx.types.u32,
50 (I64, false) => tcx.types.u64,
51 (I128, false) => tcx.types.u128,
52 (I8, true) => tcx.types.i8,
53 (I16, true) => tcx.types.i16,
54 (I32, true) => tcx.types.i32,
55 (I64, true) => tcx.types.i64,
56 (I128, true) => tcx.types.i128,
60 /// Gets the Integer type from an attr::IntType.
61 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
62 let dl = cx.data_layout();
65 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
66 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
67 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
68 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
69 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
70 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
71 dl.ptr_sized_integer()
76 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
79 ty::IntTy::I16 => I16,
80 ty::IntTy::I32 => I32,
81 ty::IntTy::I64 => I64,
82 ty::IntTy::I128 => I128,
83 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
86 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
89 ty::UintTy::U16 => I16,
90 ty::UintTy::U32 => I32,
91 ty::UintTy::U64 => I64,
92 ty::UintTy::U128 => I128,
93 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
97 /// Finds the appropriate Integer type and signedness for the given
98 /// signed discriminant range and `#[repr]` attribute.
99 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
100 /// that shouldn't affect anything, other than maybe debuginfo.
107 ) -> (Integer, bool) {
108 // Theoretically, negative values could be larger in unsigned representation
109 // than the unsigned representation of the signed minimum. However, if there
110 // are any negative values, the only valid unsigned representation is u128
111 // which can fit all i128 values, so the result remains unaffected.
112 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
113 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
115 let mut min_from_extern = None;
116 let min_default = I8;
118 if let Some(ity) = repr.int {
119 let discr = Integer::from_attr(&tcx, ity);
120 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
123 "Integer::repr_discr: `#[repr]` hint too small for \
124 discriminant range of enum `{}",
128 return (discr, ity.is_signed());
132 match &tcx.sess.target.arch[..] {
133 "hexagon" => min_from_extern = Some(I8),
134 // WARNING: the ARM EABI has two variants; the one corresponding
135 // to `at_least == I32` appears to be used on Linux and NetBSD,
136 // but some systems may use the variant corresponding to no
137 // lower bound. However, we don't run on those yet...?
138 "arm" => min_from_extern = Some(I32),
139 _ => min_from_extern = Some(I32),
143 let at_least = min_from_extern.unwrap_or(min_default);
145 // If there are no negative values, we can use the unsigned fit.
147 (cmp::max(unsigned_fit, at_least), false)
149 (cmp::max(signed_fit, at_least), true)
154 pub trait PrimitiveExt {
155 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
159 impl PrimitiveExt for Primitive {
160 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
162 Int(i, signed) => i.to_ty(tcx, signed),
163 F32 => tcx.types.f32,
164 F64 => tcx.types.f64,
165 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169 /// Return an *integer* type matching this primitive.
170 /// Useful in particular when dealing with enum discriminants.
171 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173 Int(i, signed) => i.to_ty(tcx, signed),
174 Pointer => tcx.types.usize,
175 F32 | F64 => bug!("floats do not have an int type"),
180 /// The first half of a fat pointer.
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
186 /// The second half of a fat pointer.
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
192 /// The maximum supported number of lanes in a SIMD vector.
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
202 SizeOverflow(Ty<'tcx>),
205 impl<'tcx> fmt::Display for LayoutError<'tcx> {
206 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
209 LayoutError::SizeOverflow(ty) => {
210 write!(f, "values of the type `{}` are too big for the current architecture", ty)
218 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
219 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
220 ty::tls::with_related_context(tcx, move |icx| {
221 let (param_env, ty) = query.into_parts();
223 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
224 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
227 // Update the ImplicitCtxt to increase the layout_depth
228 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
230 ty::tls::enter_context(&icx, |_| {
231 let cx = LayoutCx { tcx, param_env };
232 let layout = cx.layout_raw_uncached(ty);
233 // Type-level uninhabitedness should always imply ABI uninhabitedness.
234 if let Ok(layout) = layout {
235 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
236 assert!(layout.abi.is_uninhabited());
244 pub fn provide(providers: &mut ty::query::Providers) {
245 *providers = ty::query::Providers { layout_raw, ..*providers };
248 pub struct LayoutCx<'tcx, C> {
250 pub param_env: ty::ParamEnv<'tcx>,
253 #[derive(Copy, Clone, Debug)]
255 /// A tuple, closure, or univariant which cannot be coerced to unsized.
257 /// A univariant, the last field of which may be coerced to unsized.
259 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
260 Prefixed(Size, Align),
263 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
264 // This is used to go between `memory_index` (source field order to memory order)
265 // and `inverse_memory_index` (memory order to source field order).
266 // See also `FieldsShape::Arbitrary::memory_index` for more details.
267 // FIXME(eddyb) build a better abstraction for permutations, if possible.
268 fn invert_mapping(map: &[u32]) -> Vec<u32> {
269 let mut inverse = vec![0; map.len()];
270 for i in 0..map.len() {
271 inverse[map[i] as usize] = i as u32;
276 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
277 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
278 let dl = self.data_layout();
279 let b_align = b.value.align(dl);
280 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
281 let b_offset = a.value.size(dl).align_to(b_align.abi);
282 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
284 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
285 // returns the last maximum.
286 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
288 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
289 .max_by_key(|niche| niche.available(dl));
292 variants: Variants::Single { index: VariantIdx::new(0) },
293 fields: FieldsShape::Arbitrary {
294 offsets: vec![Size::ZERO, b_offset],
295 memory_index: vec![0, 1],
297 abi: Abi::ScalarPair(a, b),
304 fn univariant_uninterned(
307 fields: &[TyAndLayout<'_>],
310 ) -> Result<Layout, LayoutError<'tcx>> {
311 let dl = self.data_layout();
312 let pack = repr.pack;
313 if pack.is_some() && repr.align.is_some() {
314 bug!("struct cannot be packed and aligned");
317 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
319 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
321 let optimize = !repr.inhibit_struct_field_reordering_opt();
324 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
325 let optimizing = &mut inverse_memory_index[..end];
326 let field_align = |f: &TyAndLayout<'_>| {
327 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
330 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
331 optimizing.sort_by_key(|&x| {
332 // Place ZSTs first to avoid "interesting offsets",
333 // especially with only one or two non-ZST fields.
334 let f = &fields[x as usize];
335 (!f.is_zst(), cmp::Reverse(field_align(f)))
338 StructKind::Prefixed(..) => {
339 // Sort in ascending alignment so that the layout stay optimal
340 // regardless of the prefix
341 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
346 // inverse_memory_index holds field indices by increasing memory offset.
347 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
348 // We now write field offsets to the corresponding offset slot;
349 // field 5 with offset 0 puts 0 in offsets[5].
350 // At the bottom of this function, we invert `inverse_memory_index` to
351 // produce `memory_index` (see `invert_mapping`).
353 let mut sized = true;
354 let mut offsets = vec![Size::ZERO; fields.len()];
355 let mut offset = Size::ZERO;
356 let mut largest_niche = None;
357 let mut largest_niche_available = 0;
359 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
361 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
362 align = align.max(AbiAndPrefAlign::new(prefix_align));
363 offset = prefix_size.align_to(prefix_align);
366 for &i in &inverse_memory_index {
367 let field = fields[i as usize];
369 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
372 if field.is_unsized() {
376 // Invariant: offset < dl.obj_size_bound() <= 1<<61
377 let field_align = if let Some(pack) = pack {
378 field.align.min(AbiAndPrefAlign::new(pack))
382 offset = offset.align_to(field_align.abi);
383 align = align.max(field_align);
385 debug!("univariant offset: {:?} field: {:#?}", offset, field);
386 offsets[i as usize] = offset;
388 if !repr.hide_niche() {
389 if let Some(mut niche) = field.largest_niche.clone() {
390 let available = niche.available(dl);
391 if available > largest_niche_available {
392 largest_niche_available = available;
393 niche.offset += offset;
394 largest_niche = Some(niche);
399 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
402 if let Some(repr_align) = repr.align {
403 align = align.max(AbiAndPrefAlign::new(repr_align));
406 debug!("univariant min_size: {:?}", offset);
407 let min_size = offset;
409 // As stated above, inverse_memory_index holds field indices by increasing offset.
410 // This makes it an already-sorted view of the offsets vec.
411 // To invert it, consider:
412 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
413 // Field 5 would be the first element, so memory_index is i:
414 // Note: if we didn't optimize, it's already right.
417 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
419 let size = min_size.align_to(align.abi);
420 let mut abi = Abi::Aggregate { sized };
422 // Unpack newtype ABIs and find scalar pairs.
423 if sized && size.bytes() > 0 {
424 // All other fields must be ZSTs.
425 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
427 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
428 // We have exactly one non-ZST field.
429 (Some((i, field)), None, None) => {
430 // Field fills the struct and it has a scalar or scalar pair ABI.
431 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
434 // For plain scalars, or vectors of them, we can't unpack
435 // newtypes for `#[repr(C)]`, as that affects C ABIs.
436 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
437 abi = field.abi.clone();
439 // But scalar pairs are Rust-specific and get
440 // treated as aggregates by C ABIs anyway.
441 Abi::ScalarPair(..) => {
442 abi = field.abi.clone();
449 // Two non-ZST fields, and they're both scalars.
451 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
452 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
455 // Order by the memory placement, not source order.
456 let ((i, a), (j, b)) =
457 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
458 let pair = self.scalar_pair(a.clone(), b.clone());
459 let pair_offsets = match pair.fields {
460 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
461 assert_eq!(memory_index, &[0, 1]);
466 if offsets[i] == pair_offsets[0]
467 && offsets[j] == pair_offsets[1]
468 && align == pair.align
471 // We can use `ScalarPair` only when it matches our
472 // already computed layout (including `#[repr(C)]`).
481 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
482 abi = Abi::Uninhabited;
486 variants: Variants::Single { index: VariantIdx::new(0) },
487 fields: FieldsShape::Arbitrary { offsets, memory_index },
495 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
497 let param_env = self.param_env;
498 let dl = self.data_layout();
499 let scalar_unit = |value: Primitive| {
500 let bits = value.size(dl).bits();
501 assert!(bits <= 128);
502 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
504 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
506 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
507 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
509 debug_assert!(!ty.has_infer_types_or_consts());
511 Ok(match *ty.kind() {
513 ty::Bool => tcx.intern_layout(Layout::scalar(
515 Scalar { value: Int(I8, false), valid_range: 0..=1 },
517 ty::Char => tcx.intern_layout(Layout::scalar(
519 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
521 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
522 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
523 ty::Float(fty) => scalar(match fty {
524 ty::FloatTy::F32 => F32,
525 ty::FloatTy::F64 => F64,
528 let mut ptr = scalar_unit(Pointer);
529 ptr.valid_range = 1..=*ptr.valid_range.end();
530 tcx.intern_layout(Layout::scalar(self, ptr))
534 ty::Never => tcx.intern_layout(Layout {
535 variants: Variants::Single { index: VariantIdx::new(0) },
536 fields: FieldsShape::Primitive,
537 abi: Abi::Uninhabited,
543 // Potentially-wide pointers.
544 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
545 let mut data_ptr = scalar_unit(Pointer);
546 if !ty.is_unsafe_ptr() {
547 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
550 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
551 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
552 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
555 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
556 let metadata = match unsized_part.kind() {
558 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
560 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
562 let mut vtable = scalar_unit(Pointer);
563 vtable.valid_range = 1..=*vtable.valid_range.end();
566 _ => return Err(LayoutError::Unknown(unsized_part)),
569 // Effectively a (ptr, meta) tuple.
570 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
573 // Arrays and slices.
574 ty::Array(element, mut count) => {
575 if count.has_projections() {
576 count = tcx.normalize_erasing_regions(param_env, count);
577 if count.has_projections() {
578 return Err(LayoutError::Unknown(ty));
582 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
583 let element = self.layout_of(element)?;
585 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
588 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
591 Abi::Aggregate { sized: true }
594 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
596 tcx.intern_layout(Layout {
597 variants: Variants::Single { index: VariantIdx::new(0) },
598 fields: FieldsShape::Array { stride: element.size, count },
601 align: element.align,
605 ty::Slice(element) => {
606 let element = self.layout_of(element)?;
607 tcx.intern_layout(Layout {
608 variants: Variants::Single { index: VariantIdx::new(0) },
609 fields: FieldsShape::Array { stride: element.size, count: 0 },
610 abi: Abi::Aggregate { sized: false },
612 align: element.align,
616 ty::Str => tcx.intern_layout(Layout {
617 variants: Variants::Single { index: VariantIdx::new(0) },
618 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
619 abi: Abi::Aggregate { sized: false },
626 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
627 ty::Dynamic(..) | ty::Foreign(..) => {
628 let mut unit = self.univariant_uninterned(
631 &ReprOptions::default(),
632 StructKind::AlwaysSized,
635 Abi::Aggregate { ref mut sized } => *sized = false,
638 tcx.intern_layout(unit)
641 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
643 ty::Closure(_, ref substs) => {
644 let tys = substs.as_closure().upvar_tys();
646 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
647 &ReprOptions::default(),
648 StructKind::AlwaysSized,
654 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
658 .map(|k| self.layout_of(k.expect_ty()))
659 .collect::<Result<Vec<_>, _>>()?,
660 &ReprOptions::default(),
665 // SIMD vector types.
666 ty::Adt(def, substs) if def.repr.simd() => {
667 // Supported SIMD vectors are homogeneous ADTs with at least one field:
669 // * #[repr(simd)] struct S(T, T, T, T);
670 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
671 // * #[repr(simd)] struct S([T; 4])
673 // where T is a primitive scalar (integer/float/pointer).
675 // SIMD vectors with zero fields are not supported.
676 // (should be caught by typeck)
677 if def.non_enum_variant().fields.is_empty() {
678 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
681 // Type of the first ADT field:
682 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
684 // Heterogeneous SIMD vectors are not supported:
685 // (should be caught by typeck)
686 for fi in &def.non_enum_variant().fields {
687 if fi.ty(tcx, substs) != f0_ty {
688 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
692 // The element type and number of elements of the SIMD vector
693 // are obtained from:
695 // * the element type and length of the single array field, if
696 // the first field is of array type, or
698 // * the homogenous field type and the number of fields.
699 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
700 // First ADT field is an array:
702 // SIMD vectors with multiple array fields are not supported:
703 // (should be caught by typeck)
704 if def.non_enum_variant().fields.len() != 1 {
705 tcx.sess.fatal(&format!(
706 "monomorphising SIMD type `{}` with more than one array field",
711 // Extract the number of elements from the layout of the array field:
712 let len = if let Ok(TyAndLayout {
713 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
715 }) = self.layout_of(f0_ty)
719 return Err(LayoutError::Unknown(ty));
724 // First ADT field is not an array:
725 (f0_ty, def.non_enum_variant().fields.len() as _, false)
728 // SIMD vectors of zero length are not supported.
729 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
732 // Can't be caught in typeck if the array length is generic.
734 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
735 } else if !e_len.is_power_of_two() {
736 tcx.sess.fatal(&format!(
737 "monomorphising SIMD type `{}` of non-power-of-two length",
740 } else if e_len > MAX_SIMD_LANES {
741 tcx.sess.fatal(&format!(
742 "monomorphising SIMD type `{}` of length greater than {}",
747 // Compute the ABI of the element type:
748 let e_ly = self.layout_of(e_ty)?;
749 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
752 // This error isn't caught in typeck, e.g., if
753 // the element type of the vector is generic.
754 tcx.sess.fatal(&format!(
755 "monomorphising SIMD type `{}` with a non-primitive-scalar \
756 (integer/float/pointer) element type `{}`",
761 // Compute the size and alignment of the vector:
762 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
763 let align = dl.vector_align(size);
764 let size = size.align_to(align.abi);
766 // Compute the placement of the vector fields:
767 let fields = if is_array {
768 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
770 FieldsShape::Array { stride: e_ly.size, count: e_len }
773 tcx.intern_layout(Layout {
774 variants: Variants::Single { index: VariantIdx::new(0) },
776 abi: Abi::Vector { element: e_abi, count: e_len },
777 largest_niche: e_ly.largest_niche.clone(),
784 ty::Adt(def, substs) => {
785 // Cache the field layouts.
792 .map(|field| self.layout_of(field.ty(tcx, substs)))
793 .collect::<Result<Vec<_>, _>>()
795 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
798 if def.repr.pack.is_some() && def.repr.align.is_some() {
799 bug!("union cannot be packed and aligned");
803 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
805 if let Some(repr_align) = def.repr.align {
806 align = align.max(AbiAndPrefAlign::new(repr_align));
809 let optimize = !def.repr.inhibit_union_abi_opt();
810 let mut size = Size::ZERO;
811 let mut abi = Abi::Aggregate { sized: true };
812 let index = VariantIdx::new(0);
813 for field in &variants[index] {
814 assert!(!field.is_unsized());
815 align = align.max(field.align);
817 // If all non-ZST fields have the same ABI, forward this ABI
818 if optimize && !field.is_zst() {
819 // Normalize scalar_unit to the maximal valid range
820 let field_abi = match &field.abi {
821 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
822 Abi::ScalarPair(x, y) => {
823 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
825 Abi::Vector { element: x, count } => {
826 Abi::Vector { element: scalar_unit(x.value), count: *count }
828 Abi::Uninhabited | Abi::Aggregate { .. } => {
829 Abi::Aggregate { sized: true }
833 if size == Size::ZERO {
834 // first non ZST: initialize 'abi'
836 } else if abi != field_abi {
837 // different fields have different ABI: reset to Aggregate
838 abi = Abi::Aggregate { sized: true };
842 size = cmp::max(size, field.size);
845 if let Some(pack) = def.repr.pack {
846 align = align.min(AbiAndPrefAlign::new(pack));
849 return Ok(tcx.intern_layout(Layout {
850 variants: Variants::Single { index },
851 fields: FieldsShape::Union(
852 NonZeroUsize::new(variants[index].len())
853 .ok_or(LayoutError::Unknown(ty))?,
858 size: size.align_to(align.abi),
862 // A variant is absent if it's uninhabited and only has ZST fields.
863 // Present uninhabited variants only require space for their fields,
864 // but *not* an encoding of the discriminant (e.g., a tag value).
865 // See issue #49298 for more details on the need to leave space
866 // for non-ZST uninhabited data (mostly partial initialization).
867 let absent = |fields: &[TyAndLayout<'_>]| {
868 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
869 let is_zst = fields.iter().all(|f| f.is_zst());
870 uninhabited && is_zst
872 let (present_first, present_second) = {
873 let mut present_variants = variants
875 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
876 (present_variants.next(), present_variants.next())
878 let present_first = match present_first {
879 Some(present_first) => present_first,
880 // Uninhabited because it has no variants, or only absent ones.
881 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
882 // If it's a struct, still compute a layout so that we can still compute the
884 None => VariantIdx::new(0),
887 let is_struct = !def.is_enum() ||
888 // Only one variant is present.
889 (present_second.is_none() &&
890 // Representation optimizations are allowed.
891 !def.repr.inhibit_enum_layout_opt());
893 // Struct, or univariant enum equivalent to a struct.
894 // (Typechecking will reject discriminant-sizing attrs.)
896 let v = present_first;
897 let kind = if def.is_enum() || variants[v].is_empty() {
898 StructKind::AlwaysSized
900 let param_env = tcx.param_env(def.did);
901 let last_field = def.variants[v].fields.last().unwrap();
903 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
905 StructKind::MaybeUnsized
907 StructKind::AlwaysSized
911 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
912 st.variants = Variants::Single { index: v };
913 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
915 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
916 // the asserts ensure that we are not using the
917 // `#[rustc_layout_scalar_valid_range(n)]`
918 // attribute to widen the range of anything as that would probably
919 // result in UB somewhere
920 // FIXME(eddyb) the asserts are probably not needed,
921 // as larger validity ranges would result in missed
922 // optimizations, *not* wrongly assuming the inner
923 // value is valid. e.g. unions enlarge validity ranges,
924 // because the values may be uninitialized.
925 if let Bound::Included(start) = start {
926 // FIXME(eddyb) this might be incorrect - it doesn't
927 // account for wrap-around (end < start) ranges.
928 assert!(*scalar.valid_range.start() <= start);
929 scalar.valid_range = start..=*scalar.valid_range.end();
931 if let Bound::Included(end) = end {
932 // FIXME(eddyb) this might be incorrect - it doesn't
933 // account for wrap-around (end < start) ranges.
934 assert!(*scalar.valid_range.end() >= end);
935 scalar.valid_range = *scalar.valid_range.start()..=end;
938 // Update `largest_niche` if we have introduced a larger niche.
939 let niche = if def.repr.hide_niche() {
942 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
944 if let Some(niche) = niche {
945 match &st.largest_niche {
946 Some(largest_niche) => {
947 // Replace the existing niche even if they're equal,
948 // because this one is at a lower offset.
949 if largest_niche.available(dl) <= niche.available(dl) {
950 st.largest_niche = Some(niche);
953 None => st.largest_niche = Some(niche),
958 start == Bound::Unbounded && end == Bound::Unbounded,
959 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
965 return Ok(tcx.intern_layout(st));
968 // At this point, we have handled all unions and
969 // structs. (We have also handled univariant enums
970 // that allow representation optimization.)
971 assert!(def.is_enum());
973 // The current code for niche-filling relies on variant indices
974 // instead of actual discriminants, so dataful enums with
975 // explicit discriminants (RFC #2363) would misbehave.
976 let no_explicit_discriminants = def
979 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
981 let mut niche_filling_layout = None;
983 // Niche-filling enum optimization.
984 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
985 let mut dataful_variant = None;
986 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
988 // Find one non-ZST variant.
989 'variants: for (v, fields) in variants.iter_enumerated() {
995 if dataful_variant.is_none() {
996 dataful_variant = Some(v);
999 dataful_variant = None;
1004 niche_variants = *niche_variants.start().min(&v)..=v;
1007 if niche_variants.start() > niche_variants.end() {
1008 dataful_variant = None;
1011 if let Some(i) = dataful_variant {
1012 let count = (niche_variants.end().as_u32()
1013 - niche_variants.start().as_u32()
1016 // Find the field with the largest niche
1017 let niche_candidate = variants[i]
1020 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1021 .max_by_key(|(_, niche)| niche.available(dl));
1023 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1024 niche_candidate.and_then(|(field_index, niche)| {
1025 Some((field_index, niche, niche.reserve(self, count)?))
1028 let mut align = dl.aggregate_align;
1032 let mut st = self.univariant_uninterned(
1036 StructKind::AlwaysSized,
1038 st.variants = Variants::Single { index: j };
1040 align = align.max(st.align);
1044 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1046 let offset = st[i].fields.offset(field_index) + niche.offset;
1047 let size = st[i].size;
1049 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1053 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1054 Abi::ScalarPair(ref first, ref second) => {
1055 // We need to use scalar_unit to reset the
1056 // valid range to the maximal one for that
1057 // primitive, because only the niche is
1058 // guaranteed to be initialised, not the
1060 if offset.bytes() == 0 {
1062 niche_scalar.clone(),
1063 scalar_unit(second.value),
1067 scalar_unit(first.value),
1068 niche_scalar.clone(),
1072 _ => Abi::Aggregate { sized: true },
1077 Niche::from_scalar(dl, offset, niche_scalar.clone());
1079 niche_filling_layout = Some(Layout {
1080 variants: Variants::Multiple {
1082 tag_encoding: TagEncoding::Niche {
1090 fields: FieldsShape::Arbitrary {
1091 offsets: vec![offset],
1092 memory_index: vec![0],
1103 let (mut min, mut max) = (i128::MAX, i128::MIN);
1104 let discr_type = def.repr.discr_type();
1105 let bits = Integer::from_attr(self, discr_type).size().bits();
1106 for (i, discr) in def.discriminants(tcx) {
1107 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1110 let mut x = discr.val as i128;
1111 if discr_type.is_signed() {
1112 // sign extend the raw representation to be an i128
1113 x = (x << (128 - bits)) >> (128 - bits);
1122 // We might have no inhabited variants, so pretend there's at least one.
1123 if (min, max) == (i128::MAX, i128::MIN) {
1127 assert!(min <= max, "discriminant range is {}...{}", min, max);
1128 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1130 let mut align = dl.aggregate_align;
1131 let mut size = Size::ZERO;
1133 // We're interested in the smallest alignment, so start large.
1134 let mut start_align = Align::from_bytes(256).unwrap();
1135 assert_eq!(Integer::for_align(dl, start_align), None);
1137 // repr(C) on an enum tells us to make a (tag, union) layout,
1138 // so we need to grow the prefix alignment to be at least
1139 // the alignment of the union. (This value is used both for
1140 // determining the alignment of the overall enum, and the
1141 // determining the alignment of the payload after the tag.)
1142 let mut prefix_align = min_ity.align(dl).abi;
1144 for fields in &variants {
1145 for field in fields {
1146 prefix_align = prefix_align.max(field.align.abi);
1151 // Create the set of structs that represent each variant.
1152 let mut layout_variants = variants
1154 .map(|(i, field_layouts)| {
1155 let mut st = self.univariant_uninterned(
1159 StructKind::Prefixed(min_ity.size(), prefix_align),
1161 st.variants = Variants::Single { index: i };
1162 // Find the first field we can't move later
1163 // to make room for a larger discriminant.
1165 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1167 if !field.is_zst() || field.align.abi.bytes() != 1 {
1168 start_align = start_align.min(field.align.abi);
1172 size = cmp::max(size, st.size);
1173 align = align.max(st.align);
1176 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1178 // Align the maximum variant size to the largest alignment.
1179 size = size.align_to(align.abi);
1181 if size.bytes() >= dl.obj_size_bound() {
1182 return Err(LayoutError::SizeOverflow(ty));
1185 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1186 if typeck_ity < min_ity {
1187 // It is a bug if Layout decided on a greater discriminant size than typeck for
1188 // some reason at this point (based on values discriminant can take on). Mostly
1189 // because this discriminant will be loaded, and then stored into variable of
1190 // type calculated by typeck. Consider such case (a bug): typeck decided on
1191 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1192 // discriminant values. That would be a bug, because then, in codegen, in order
1193 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1194 // space necessary to represent would have to be discarded (or layout is wrong
1195 // on thinking it needs 16 bits)
1197 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1201 // However, it is fine to make discr type however large (as an optimisation)
1202 // after this point – we’ll just truncate the value we load in codegen.
1205 // Check to see if we should use a different type for the
1206 // discriminant. We can safely use a type with the same size
1207 // as the alignment of the first field of each variant.
1208 // We increase the size of the discriminant to avoid LLVM copying
1209 // padding when it doesn't need to. This normally causes unaligned
1210 // load/stores and excessive memcpy/memset operations. By using a
1211 // bigger integer size, LLVM can be sure about its contents and
1212 // won't be so conservative.
1214 // Use the initial field alignment
1215 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1218 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1221 // If the alignment is not larger than the chosen discriminant size,
1222 // don't use the alignment as the final size.
1226 // Patch up the variants' first few fields.
1227 let old_ity_size = min_ity.size();
1228 let new_ity_size = ity.size();
1229 for variant in &mut layout_variants {
1230 match variant.fields {
1231 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1233 if *i <= old_ity_size {
1234 assert_eq!(*i, old_ity_size);
1238 // We might be making the struct larger.
1239 if variant.size <= old_ity_size {
1240 variant.size = new_ity_size;
1248 let tag_mask = !0u128 >> (128 - ity.size().bits());
1250 value: Int(ity, signed),
1251 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1253 let mut abi = Abi::Aggregate { sized: true };
1254 if tag.value.size(dl) == size {
1255 abi = Abi::Scalar(tag.clone());
1257 // Try to use a ScalarPair for all tagged enums.
1258 let mut common_prim = None;
1259 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1260 let offsets = match layout_variant.fields {
1261 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1265 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1266 let (field, offset) = match (fields.next(), fields.next()) {
1267 (None, None) => continue,
1268 (Some(pair), None) => pair,
1274 let prim = match field.abi {
1275 Abi::Scalar(ref scalar) => scalar.value,
1281 if let Some(pair) = common_prim {
1282 // This is pretty conservative. We could go fancier
1283 // by conflating things like i32 and u32, or even
1284 // realising that (u8, u8) could just cohabit with
1286 if pair != (prim, offset) {
1291 common_prim = Some((prim, offset));
1294 if let Some((prim, offset)) = common_prim {
1295 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1296 let pair_offsets = match pair.fields {
1297 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1298 assert_eq!(memory_index, &[0, 1]);
1303 if pair_offsets[0] == Size::ZERO
1304 && pair_offsets[1] == *offset
1305 && align == pair.align
1306 && size == pair.size
1308 // We can use `ScalarPair` only when it matches our
1309 // already computed layout (including `#[repr(C)]`).
1315 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1316 abi = Abi::Uninhabited;
1319 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1321 let tagged_layout = Layout {
1322 variants: Variants::Multiple {
1324 tag_encoding: TagEncoding::Direct,
1326 variants: layout_variants,
1328 fields: FieldsShape::Arbitrary {
1329 offsets: vec![Size::ZERO],
1330 memory_index: vec![0],
1338 let best_layout = match (tagged_layout, niche_filling_layout) {
1339 (tagged_layout, Some(niche_filling_layout)) => {
1340 // Pick the smaller layout; otherwise,
1341 // pick the layout with the larger niche; otherwise,
1342 // pick tagged as it has simpler codegen.
1343 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1345 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1346 (layout.size, cmp::Reverse(niche_size))
1349 (tagged_layout, None) => tagged_layout,
1352 tcx.intern_layout(best_layout)
1355 // Types with no meaningful known layout.
1356 ty::Projection(_) | ty::Opaque(..) => {
1357 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1358 if ty == normalized {
1359 return Err(LayoutError::Unknown(ty));
1361 tcx.layout_raw(param_env.and(normalized))?
1364 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1365 bug!("Layout::compute: unexpected type `{}`", ty)
1368 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1369 return Err(LayoutError::Unknown(ty));
1375 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1376 #[derive(Clone, Debug, PartialEq)]
1377 enum SavedLocalEligibility {
1379 Assigned(VariantIdx),
1380 // FIXME: Use newtype_index so we aren't wasting bytes
1381 Ineligible(Option<u32>),
1384 // When laying out generators, we divide our saved local fields into two
1385 // categories: overlap-eligible and overlap-ineligible.
1387 // Those fields which are ineligible for overlap go in a "prefix" at the
1388 // beginning of the layout, and always have space reserved for them.
1390 // Overlap-eligible fields are only assigned to one variant, so we lay
1391 // those fields out for each variant and put them right after the
1394 // Finally, in the layout details, we point to the fields from the
1395 // variants they are assigned to. It is possible for some fields to be
1396 // included in multiple variants. No field ever "moves around" in the
1397 // layout; its offset is always the same.
1399 // Also included in the layout are the upvars and the discriminant.
1400 // These are included as fields on the "outer" layout; they are not part
1402 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1403 /// Compute the eligibility and assignment of each local.
1404 fn generator_saved_local_eligibility(
1406 info: &GeneratorLayout<'tcx>,
1407 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1408 use SavedLocalEligibility::*;
1410 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1411 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1413 // The saved locals not eligible for overlap. These will get
1414 // "promoted" to the prefix of our generator.
1415 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1417 // Figure out which of our saved locals are fields in only
1418 // one variant. The rest are deemed ineligible for overlap.
1419 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1420 for local in fields {
1421 match assignments[*local] {
1423 assignments[*local] = Assigned(variant_index);
1426 // We've already seen this local at another suspension
1427 // point, so it is no longer a candidate.
1429 "removing local {:?} in >1 variant ({:?}, {:?})",
1434 ineligible_locals.insert(*local);
1435 assignments[*local] = Ineligible(None);
1442 // Next, check every pair of eligible locals to see if they
1444 for local_a in info.storage_conflicts.rows() {
1445 let conflicts_a = info.storage_conflicts.count(local_a);
1446 if ineligible_locals.contains(local_a) {
1450 for local_b in info.storage_conflicts.iter(local_a) {
1451 // local_a and local_b are storage live at the same time, therefore they
1452 // cannot overlap in the generator layout. The only way to guarantee
1453 // this is if they are in the same variant, or one is ineligible
1454 // (which means it is stored in every variant).
1455 if ineligible_locals.contains(local_b)
1456 || assignments[local_a] == assignments[local_b]
1461 // If they conflict, we will choose one to make ineligible.
1462 // This is not always optimal; it's just a greedy heuristic that
1463 // seems to produce good results most of the time.
1464 let conflicts_b = info.storage_conflicts.count(local_b);
1465 let (remove, other) =
1466 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1467 ineligible_locals.insert(remove);
1468 assignments[remove] = Ineligible(None);
1469 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1473 // Count the number of variants in use. If only one of them, then it is
1474 // impossible to overlap any locals in our layout. In this case it's
1475 // always better to make the remaining locals ineligible, so we can
1476 // lay them out with the other locals in the prefix and eliminate
1477 // unnecessary padding bytes.
1479 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1480 for assignment in &assignments {
1481 if let Assigned(idx) = assignment {
1482 used_variants.insert(*idx);
1485 if used_variants.count() < 2 {
1486 for assignment in assignments.iter_mut() {
1487 *assignment = Ineligible(None);
1489 ineligible_locals.insert_all();
1493 // Write down the order of our locals that will be promoted to the prefix.
1495 for (idx, local) in ineligible_locals.iter().enumerate() {
1496 assignments[local] = Ineligible(Some(idx as u32));
1499 debug!("generator saved local assignments: {:?}", assignments);
1501 (ineligible_locals, assignments)
1504 /// Compute the full generator layout.
1505 fn generator_layout(
1508 def_id: hir::def_id::DefId,
1509 substs: SubstsRef<'tcx>,
1510 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1511 use SavedLocalEligibility::*;
1513 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1515 let info = match tcx.generator_layout(def_id) {
1516 None => return Err(LayoutError::Unknown(ty)),
1519 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1521 // Build a prefix layout, including "promoting" all ineligible
1522 // locals as part of the prefix. We compute the layout of all of
1523 // these fields at once to get optimal packing.
1524 let tag_index = substs.as_generator().prefix_tys().count();
1526 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1527 let max_discr = (info.variant_fields.len() - 1) as u128;
1528 let discr_int = Integer::fit_unsigned(max_discr);
1529 let discr_int_ty = discr_int.to_ty(tcx, false);
1530 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1531 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1532 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1534 let promoted_layouts = ineligible_locals
1536 .map(|local| subst_field(info.field_tys[local]))
1537 .map(|ty| tcx.mk_maybe_uninit(ty))
1538 .map(|ty| self.layout_of(ty));
1539 let prefix_layouts = substs
1542 .map(|ty| self.layout_of(ty))
1543 .chain(iter::once(Ok(tag_layout)))
1544 .chain(promoted_layouts)
1545 .collect::<Result<Vec<_>, _>>()?;
1546 let prefix = self.univariant_uninterned(
1549 &ReprOptions::default(),
1550 StructKind::AlwaysSized,
1553 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1555 // Split the prefix layout into the "outer" fields (upvars and
1556 // discriminant) and the "promoted" fields. Promoted fields will
1557 // get included in each variant that requested them in
1559 debug!("prefix = {:#?}", prefix);
1560 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1561 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1562 let mut inverse_memory_index = invert_mapping(&memory_index);
1564 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1565 // "outer" and "promoted" fields respectively.
1566 let b_start = (tag_index + 1) as u32;
1567 let offsets_b = offsets.split_off(b_start as usize);
1568 let offsets_a = offsets;
1570 // Disentangle the "a" and "b" components of `inverse_memory_index`
1571 // by preserving the order but keeping only one disjoint "half" each.
1572 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1573 let inverse_memory_index_b: Vec<_> =
1574 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1575 inverse_memory_index.retain(|&i| i < b_start);
1576 let inverse_memory_index_a = inverse_memory_index;
1578 // Since `inverse_memory_index_{a,b}` each only refer to their
1579 // respective fields, they can be safely inverted
1580 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1581 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1584 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1585 (outer_fields, offsets_b, memory_index_b)
1590 let mut size = prefix.size;
1591 let mut align = prefix.align;
1595 .map(|(index, variant_fields)| {
1596 // Only include overlap-eligible fields when we compute our variant layout.
1597 let variant_only_tys = variant_fields
1599 .filter(|local| match assignments[**local] {
1600 Unassigned => bug!(),
1601 Assigned(v) if v == index => true,
1602 Assigned(_) => bug!("assignment does not match variant"),
1603 Ineligible(_) => false,
1605 .map(|local| subst_field(info.field_tys[*local]));
1607 let mut variant = self.univariant_uninterned(
1610 .map(|ty| self.layout_of(ty))
1611 .collect::<Result<Vec<_>, _>>()?,
1612 &ReprOptions::default(),
1613 StructKind::Prefixed(prefix_size, prefix_align.abi),
1615 variant.variants = Variants::Single { index };
1617 let (offsets, memory_index) = match variant.fields {
1618 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1622 // Now, stitch the promoted and variant-only fields back together in
1623 // the order they are mentioned by our GeneratorLayout.
1624 // Because we only use some subset (that can differ between variants)
1625 // of the promoted fields, we can't just pick those elements of the
1626 // `promoted_memory_index` (as we'd end up with gaps).
1627 // So instead, we build an "inverse memory_index", as if all of the
1628 // promoted fields were being used, but leave the elements not in the
1629 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1630 // obtain a valid (bijective) mapping.
1631 const INVALID_FIELD_IDX: u32 = !0;
1632 let mut combined_inverse_memory_index =
1633 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1634 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1635 let combined_offsets = variant_fields
1639 let (offset, memory_index) = match assignments[*local] {
1640 Unassigned => bug!(),
1642 let (offset, memory_index) =
1643 offsets_and_memory_index.next().unwrap();
1644 (offset, promoted_memory_index.len() as u32 + memory_index)
1646 Ineligible(field_idx) => {
1647 let field_idx = field_idx.unwrap() as usize;
1648 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1651 combined_inverse_memory_index[memory_index as usize] = i as u32;
1656 // Remove the unused slots and invert the mapping to obtain the
1657 // combined `memory_index` (also see previous comment).
1658 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1659 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1661 variant.fields = FieldsShape::Arbitrary {
1662 offsets: combined_offsets,
1663 memory_index: combined_memory_index,
1666 size = size.max(variant.size);
1667 align = align.max(variant.align);
1670 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1672 size = size.align_to(align.abi);
1674 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1678 Abi::Aggregate { sized: true }
1681 let layout = tcx.intern_layout(Layout {
1682 variants: Variants::Multiple {
1684 tag_encoding: TagEncoding::Direct,
1685 tag_field: tag_index,
1688 fields: outer_fields,
1690 largest_niche: prefix.largest_niche,
1694 debug!("generator layout ({:?}): {:#?}", ty, layout);
1698 /// This is invoked by the `layout_raw` query to record the final
1699 /// layout of each type.
1701 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1702 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1703 // for dumping later.
1704 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1705 self.record_layout_for_printing_outlined(layout)
1709 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1710 // Ignore layouts that are done with non-empty environments or
1711 // non-monomorphic layouts, as the user only wants to see the stuff
1712 // resulting from the final codegen session.
1713 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1717 // (delay format until we actually need it)
1718 let record = |kind, packed, opt_discr_size, variants| {
1719 let type_desc = format!("{:?}", layout.ty);
1720 self.tcx.sess.code_stats.record_type_size(
1731 let adt_def = match *layout.ty.kind() {
1732 ty::Adt(ref adt_def, _) => {
1733 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1737 ty::Closure(..) => {
1738 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1739 record(DataTypeKind::Closure, false, None, vec![]);
1744 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1749 let adt_kind = adt_def.adt_kind();
1750 let adt_packed = adt_def.repr.pack.is_some();
1752 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1753 let mut min_size = Size::ZERO;
1754 let field_info: Vec<_> = flds
1757 .map(|(i, &name)| match layout.field(self, i) {
1759 bug!("no layout found for field {}: `{:?}`", name, err);
1761 Ok(field_layout) => {
1762 let offset = layout.fields.offset(i);
1763 let field_end = offset + field_layout.size;
1764 if min_size < field_end {
1765 min_size = field_end;
1768 name: name.to_string(),
1769 offset: offset.bytes(),
1770 size: field_layout.size.bytes(),
1771 align: field_layout.align.abi.bytes(),
1778 name: n.map(|n| n.to_string()),
1779 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1780 align: layout.align.abi.bytes(),
1781 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1786 match layout.variants {
1787 Variants::Single { index } => {
1788 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1789 if !adt_def.variants.is_empty() {
1790 let variant_def = &adt_def.variants[index];
1791 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1796 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1799 // (This case arises for *empty* enums; so give it
1801 record(adt_kind.into(), adt_packed, None, vec![]);
1805 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1807 "print-type-size `{:#?}` adt general variants def {}",
1809 adt_def.variants.len()
1811 let variant_infos: Vec<_> = adt_def
1814 .map(|(i, variant_def)| {
1815 let fields: Vec<_> =
1816 variant_def.fields.iter().map(|f| f.ident.name).collect();
1818 Some(variant_def.ident),
1820 layout.for_variant(self, i),
1827 match tag_encoding {
1828 TagEncoding::Direct => Some(tag.value.size(self)),
1838 /// Type size "skeleton", i.e., the only information determining a type's size.
1839 /// While this is conservative, (aside from constant sizes, only pointers,
1840 /// newtypes thereof and null pointer optimized enums are allowed), it is
1841 /// enough to statically check common use cases of transmute.
1842 #[derive(Copy, Clone, Debug)]
1843 pub enum SizeSkeleton<'tcx> {
1844 /// Any statically computable Layout.
1847 /// A potentially-fat pointer.
1849 /// If true, this pointer is never null.
1851 /// The type which determines the unsized metadata, if any,
1852 /// of this pointer. Either a type parameter or a projection
1853 /// depending on one, with regions erased.
1858 impl<'tcx> SizeSkeleton<'tcx> {
1862 param_env: ty::ParamEnv<'tcx>,
1863 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1864 debug_assert!(!ty.has_infer_types_or_consts());
1866 // First try computing a static layout.
1867 let err = match tcx.layout_of(param_env.and(ty)) {
1869 return Ok(SizeSkeleton::Known(layout.size));
1875 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1876 let non_zero = !ty.is_unsafe_ptr();
1877 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1879 ty::Param(_) | ty::Projection(_) => {
1880 debug_assert!(tail.has_param_types_or_consts());
1881 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1884 "SizeSkeleton::compute({}): layout errored ({}), yet \
1885 tail `{}` is not a type parameter or a projection",
1893 ty::Adt(def, substs) => {
1894 // Only newtypes and enums w/ nullable pointer optimization.
1895 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1899 // Get a zero-sized variant or a pointer newtype.
1900 let zero_or_ptr_variant = |i| {
1901 let i = VariantIdx::new(i);
1902 let fields = def.variants[i]
1905 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1907 for field in fields {
1910 SizeSkeleton::Known(size) => {
1911 if size.bytes() > 0 {
1915 SizeSkeleton::Pointer { .. } => {
1926 let v0 = zero_or_ptr_variant(0)?;
1928 if def.variants.len() == 1 {
1929 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1930 return Ok(SizeSkeleton::Pointer {
1932 || match tcx.layout_scalar_valid_range(def.did) {
1933 (Bound::Included(start), Bound::Unbounded) => start > 0,
1934 (Bound::Included(start), Bound::Included(end)) => {
1935 0 < start && start < end
1946 let v1 = zero_or_ptr_variant(1)?;
1947 // Nullable pointer enum optimization.
1949 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1950 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1951 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1957 ty::Projection(_) | ty::Opaque(..) => {
1958 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1959 if ty == normalized {
1962 SizeSkeleton::compute(normalized, tcx, param_env)
1970 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1971 match (self, other) {
1972 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1973 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1981 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1982 fn tcx(&self) -> TyCtxt<'tcx>;
1985 pub trait HasParamEnv<'tcx> {
1986 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1989 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1990 fn data_layout(&self) -> &TargetDataLayout {
1995 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1996 fn tcx(&self) -> TyCtxt<'tcx> {
2001 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
2002 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2007 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2008 fn data_layout(&self) -> &TargetDataLayout {
2009 self.tcx.data_layout()
2013 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2014 fn tcx(&self) -> TyCtxt<'tcx> {
2019 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2021 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
2023 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2025 /// Computes the layout of a type. Note that this implicitly
2026 /// executes in "reveal all" mode.
2027 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2028 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
2029 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2030 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2031 let layout = TyAndLayout { ty, layout };
2033 // N.B., this recording is normally disabled; when enabled, it
2034 // can however trigger recursive invocations of `layout_of`.
2035 // Therefore, we execute it *after* the main query has
2036 // completed, to avoid problems around recursive structures
2037 // and the like. (Admittedly, I wasn't able to reproduce a problem
2038 // here, but it seems like the right thing to do. -nmatsakis)
2039 self.record_layout_for_printing(layout);
2045 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2047 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2049 /// Computes the layout of a type. Note that this implicitly
2050 /// executes in "reveal all" mode.
2051 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2052 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2053 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2054 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2055 let layout = TyAndLayout { ty, layout };
2057 // N.B., this recording is normally disabled; when enabled, it
2058 // can however trigger recursive invocations of `layout_of`.
2059 // Therefore, we execute it *after* the main query has
2060 // completed, to avoid problems around recursive structures
2061 // and the like. (Admittedly, I wasn't able to reproduce a problem
2062 // here, but it seems like the right thing to do. -nmatsakis)
2063 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2064 cx.record_layout_for_printing(layout);
2070 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2072 /// Computes the layout of a type. Note that this implicitly
2073 /// executes in "reveal all" mode.
2077 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2078 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2079 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2080 cx.layout_of(param_env_and_ty.value)
2084 impl ty::query::TyCtxtAt<'tcx> {
2085 /// Computes the layout of a type. Note that this implicitly
2086 /// executes in "reveal all" mode.
2090 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2091 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2092 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2093 cx.layout_of(param_env_and_ty.value)
2097 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2099 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2101 + HasParamEnv<'tcx>,
2104 this: TyAndLayout<'tcx>,
2106 variant_index: VariantIdx,
2107 ) -> TyAndLayout<'tcx> {
2108 let layout = match this.variants {
2109 Variants::Single { index }
2110 // If all variants but one are uninhabited, the variant layout is the enum layout.
2111 if index == variant_index &&
2112 // Don't confuse variants of uninhabited enums with the enum itself.
2113 // For more details see https://github.com/rust-lang/rust/issues/69763.
2114 this.fields != FieldsShape::Primitive =>
2119 Variants::Single { index } => {
2120 // Deny calling for_variant more than once for non-Single enums.
2121 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2122 assert_eq!(original_layout.variants, Variants::Single { index });
2125 let fields = match this.ty.kind() {
2126 ty::Adt(def, _) if def.variants.is_empty() =>
2127 bug!("for_variant called on zero-variant enum"),
2128 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2132 tcx.intern_layout(Layout {
2133 variants: Variants::Single { index: variant_index },
2134 fields: match NonZeroUsize::new(fields) {
2135 Some(fields) => FieldsShape::Union(fields),
2136 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2138 abi: Abi::Uninhabited,
2139 largest_niche: None,
2140 align: tcx.data_layout.i8_align,
2145 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2148 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2150 TyAndLayout { ty: this.ty, layout }
2153 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2154 enum TyMaybeWithLayout<C: LayoutOf> {
2156 TyAndLayout(C::TyAndLayout),
2159 fn ty_and_layout_kind<
2160 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2162 + HasParamEnv<'tcx>,
2164 this: TyAndLayout<'tcx>,
2168 ) -> TyMaybeWithLayout<C> {
2170 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2171 let layout = Layout::scalar(cx, tag.clone());
2172 MaybeResult::from(Ok(TyAndLayout {
2173 layout: tcx.intern_layout(layout),
2174 ty: tag.value.to_ty(tcx),
2187 | ty::GeneratorWitness(..)
2189 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2191 // Potentially-fat pointers.
2192 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2193 assert!(i < this.fields.count());
2195 // Reuse the fat `*T` type as its own thin pointer data field.
2196 // This provides information about, e.g., DST struct pointees
2197 // (which may have no non-DST form), and will work as long
2198 // as the `Abi` or `FieldsShape` is checked by users.
2200 let nil = tcx.mk_unit();
2201 let ptr_ty = if ty.is_unsafe_ptr() {
2204 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2206 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2207 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2214 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2215 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2216 ty::Dynamic(_, _) => {
2217 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2218 tcx.lifetimes.re_static,
2219 tcx.mk_array(tcx.types.usize, 3),
2221 /* FIXME: use actual fn pointers
2222 Warning: naively computing the number of entries in the
2223 vtable by counting the methods on the trait + methods on
2224 all parent traits does not work, because some methods can
2225 be not object safe and thus excluded from the vtable.
2226 Increase this counter if you tried to implement this but
2227 failed to do it without duplicating a lot of code from
2228 other places in the compiler: 2
2230 tcx.mk_array(tcx.types.usize, 3),
2231 tcx.mk_array(Option<fn()>),
2235 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2239 // Arrays and slices.
2240 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2241 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2243 // Tuples, generators and closures.
2244 ty::Closure(_, ref substs) => {
2245 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2248 ty::Generator(def_id, ref substs, _) => match this.variants {
2249 Variants::Single { index } => TyMaybeWithLayout::Ty(
2252 .state_tys(def_id, tcx)
2253 .nth(index.as_usize())
2258 Variants::Multiple { ref tag, tag_field, .. } => {
2260 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2262 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2266 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2269 ty::Adt(def, substs) => {
2270 match this.variants {
2271 Variants::Single { index } => {
2272 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2275 // Discriminant field for enums (where applicable).
2276 Variants::Multiple { ref tag, .. } => {
2278 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2285 | ty::Placeholder(..)
2289 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2293 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2294 TyMaybeWithLayout::Ty(result) => result,
2295 TyMaybeWithLayout::TyAndLayout(result) => return result,
2299 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2300 let addr_space_of_ty = |ty: Ty<'tcx>| {
2301 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2304 let pointee_info = match *this.ty.kind() {
2305 ty::RawPtr(mt) if offset.bytes() == 0 => {
2306 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2308 align: layout.align.abi,
2310 address_space: addr_space_of_ty(mt.ty),
2313 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2314 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2317 align: layout.align.abi,
2319 address_space: cx.data_layout().instruction_address_space,
2323 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2324 let address_space = addr_space_of_ty(ty);
2326 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2327 let kind = match mt {
2328 hir::Mutability::Not => {
2335 hir::Mutability::Mut => {
2336 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2337 // panic=abort mode. That was deemed right, as prior versions had many bugs
2338 // in conjunction with unwinding, but later versions didn’t seem to have
2339 // said issues. See issue #31681.
2341 // Alas, later on we encountered a case where noalias would generate wrong
2342 // code altogether even with recent versions of LLVM in *safe* code with no
2343 // unwinding involved. See #54462.
2345 // For now, do not enable mutable_noalias by default at all, while the
2346 // issue is being figured out.
2347 if tcx.sess.opts.debugging_opts.mutable_noalias {
2348 PointerKind::UniqueBorrowed
2355 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2357 align: layout.align.abi,
2364 let mut data_variant = match this.variants {
2365 // Within the discriminant field, only the niche itself is
2366 // always initialized, so we only check for a pointer at its
2369 // If the niche is a pointer, it's either valid (according
2370 // to its type), or null (which the niche field's scalar
2371 // validity range encodes). This allows using
2372 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2373 // this will continue to work as long as we don't start
2374 // using more niches than just null (e.g., the first page of
2375 // the address space, or unaligned pointers).
2376 Variants::Multiple {
2377 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2380 } if this.fields.offset(tag_field) == offset => {
2381 Some(this.for_variant(cx, dataful_variant))
2386 if let Some(variant) = data_variant {
2387 // We're not interested in any unions.
2388 if let FieldsShape::Union(_) = variant.fields {
2389 data_variant = None;
2393 let mut result = None;
2395 if let Some(variant) = data_variant {
2396 let ptr_end = offset + Pointer.size(cx);
2397 for i in 0..variant.fields.count() {
2398 let field_start = variant.fields.offset(i);
2399 if field_start <= offset {
2400 let field = variant.field(cx, i);
2401 result = field.to_result().ok().and_then(|field| {
2402 if ptr_end <= field_start + field.size {
2403 // We found the right field, look inside it.
2405 field.pointee_info_at(cx, offset - field_start);
2411 if result.is_some() {
2418 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2419 if let Some(ref mut pointee) = result {
2420 if let ty::Adt(def, _) = this.ty.kind() {
2421 if def.is_box() && offset.bytes() == 0 {
2422 pointee.safe = Some(PointerKind::UniqueOwned);
2432 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2442 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2443 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2444 use crate::ty::layout::LayoutError::*;
2445 mem::discriminant(self).hash_stable(hcx, hasher);
2448 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2453 impl<'tcx> ty::Instance<'tcx> {
2454 // NOTE(eddyb) this is private to avoid using it from outside of
2455 // `FnAbi::of_instance` - any other uses are either too high-level
2456 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2457 // or should go through `FnAbi` instead, to avoid losing any
2458 // adjustments `FnAbi::of_instance` might be performing.
2459 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2460 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2461 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2464 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2465 // parameters unused if they show up in the signature, but not in the `mir::Body`
2466 // (i.e. due to being inside a projection that got normalized, see
2467 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2468 // track of a polymorphization `ParamEnv` to allow normalizing later.
2469 let mut sig = match *ty.kind() {
2470 ty::FnDef(def_id, substs) => tcx
2471 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2472 .subst(tcx, substs),
2473 _ => unreachable!(),
2476 if let ty::InstanceDef::VtableShim(..) = self.def {
2477 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2478 sig = sig.map_bound(|mut sig| {
2479 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2480 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2481 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2487 ty::Closure(def_id, substs) => {
2488 let sig = substs.as_closure().sig();
2490 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2491 sig.map_bound(|sig| {
2493 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2501 ty::Generator(_, substs, _) => {
2502 let sig = substs.as_generator().poly_sig();
2504 let br = ty::BoundRegion { kind: ty::BrEnv };
2505 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2506 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2508 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2509 let pin_adt_ref = tcx.adt_def(pin_did);
2510 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2511 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2513 sig.map_bound(|sig| {
2514 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2515 let state_adt_ref = tcx.adt_def(state_did);
2517 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2518 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2521 [env_ty, sig.resume_ty].iter(),
2524 hir::Unsafety::Normal,
2525 rustc_target::spec::abi::Abi::Rust,
2529 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2534 pub trait FnAbiExt<'tcx, C>
2536 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2540 + HasParamEnv<'tcx>,
2542 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2544 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2545 /// instead, where the instance is a `InstanceDef::Virtual`.
2546 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2548 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2549 /// direct calls to an `fn`.
2551 /// NB: that includes virtual calls, which are represented by "direct calls"
2552 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2553 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2557 sig: ty::PolyFnSig<'tcx>,
2558 extra_args: &[Ty<'tcx>],
2559 caller_location: Option<Ty<'tcx>>,
2560 codegen_fn_attr_flags: CodegenFnAttrFlags,
2561 make_self_ptr_thin: bool,
2563 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2567 panic_strategy: PanicStrategy,
2568 codegen_fn_attr_flags: CodegenFnAttrFlags,
2571 if panic_strategy != PanicStrategy::Unwind {
2572 // In panic=abort mode we assume nothing can unwind anywhere, so
2573 // optimize based on this!
2575 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2576 // If a specific #[unwind] attribute is present, use that.
2578 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2579 // Special attribute for allocator functions, which can't unwind.
2582 if call_conv == Conv::Rust {
2583 // Any Rust method (or `extern "Rust" fn` or `extern
2584 // "rust-call" fn`) is explicitly allowed to unwind
2585 // (unless it has no-unwind attribute, handled above).
2588 // Anything else is either:
2590 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2592 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2594 // Foreign items (case 1) are assumed to not unwind; it is
2595 // UB otherwise. (At least for now; see also
2596 // rust-lang/rust#63909 and Rust RFC 2753.)
2598 // Items defined in Rust with non-Rust ABIs (case 2) are also
2599 // not supposed to unwind. Whether this should be enforced
2600 // (versus stating it is UB) and *how* it would be enforced
2601 // is currently under discussion; see rust-lang/rust#58794.
2603 // In either case, we mark item as explicitly nounwind.
2609 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2611 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2615 + HasParamEnv<'tcx>,
2617 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2618 // Assume that fn pointers may always unwind
2619 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2621 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
2624 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2625 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2627 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2628 Some(cx.tcx().caller_location_ty())
2633 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2635 call::FnAbi::new_internal(
2641 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2647 sig: ty::PolyFnSig<'tcx>,
2648 extra_args: &[Ty<'tcx>],
2649 caller_location: Option<Ty<'tcx>>,
2650 codegen_fn_attr_flags: CodegenFnAttrFlags,
2651 force_thin_self_ptr: bool,
2653 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2655 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2657 use rustc_target::spec::abi::Abi::*;
2658 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2659 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2661 // It's the ABI's job to select this, not ours.
2662 System => bug!("system abi should be selected elsewhere"),
2663 EfiApi => bug!("eficall abi should be selected elsewhere"),
2665 Stdcall => Conv::X86Stdcall,
2666 Fastcall => Conv::X86Fastcall,
2667 Vectorcall => Conv::X86VectorCall,
2668 Thiscall => Conv::X86ThisCall,
2670 Unadjusted => Conv::C,
2671 Win64 => Conv::X86_64Win64,
2672 SysV64 => Conv::X86_64SysV,
2673 Aapcs => Conv::ArmAapcs,
2674 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2675 PtxKernel => Conv::PtxKernel,
2676 Msp430Interrupt => Conv::Msp430Intr,
2677 X86Interrupt => Conv::X86Intr,
2678 AmdGpuKernel => Conv::AmdGpuKernel,
2679 AvrInterrupt => Conv::AvrInterrupt,
2680 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2682 // These API constants ought to be more specific...
2686 let mut inputs = sig.inputs();
2687 let extra_args = if sig.abi == RustCall {
2688 assert!(!sig.c_variadic && extra_args.is_empty());
2690 if let Some(input) = sig.inputs().last() {
2691 if let ty::Tuple(tupled_arguments) = input.kind() {
2692 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2693 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2696 "argument to function with \"rust-call\" ABI \
2702 "argument to function with \"rust-call\" ABI \
2707 assert!(sig.c_variadic || extra_args.is_empty());
2711 let target = &cx.tcx().sess.target;
2712 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2713 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2714 let linux_s390x_gnu_like =
2715 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2716 let linux_sparc64_gnu_like =
2717 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2718 let linux_powerpc_gnu_like =
2719 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2720 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2722 // Handle safe Rust thin and fat pointers.
2723 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2725 layout: TyAndLayout<'tcx>,
2728 // Booleans are always an i1 that needs to be zero-extended.
2729 if scalar.is_bool() {
2730 attrs.ext(ArgExtension::Zext);
2734 // Only pointer types handled below.
2735 if scalar.value != Pointer {
2739 if scalar.valid_range.start() < scalar.valid_range.end() {
2740 if *scalar.valid_range.start() > 0 {
2741 attrs.set(ArgAttribute::NonNull);
2745 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2746 if let Some(kind) = pointee.safe {
2747 attrs.pointee_align = Some(pointee.align);
2749 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2750 // for the entire duration of the function as they can be deallocated
2751 // at any time. Set their valid size to 0.
2752 attrs.pointee_size = match kind {
2753 PointerKind::UniqueOwned => Size::ZERO,
2757 // `Box` pointer parameters never alias because ownership is transferred
2758 // `&mut` pointer parameters never alias other parameters,
2759 // or mutable global data
2761 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2762 // and can be marked as both `readonly` and `noalias`, as
2763 // LLVM's definition of `noalias` is based solely on memory
2764 // dependencies rather than pointer equality
2765 let no_alias = match kind {
2766 PointerKind::Shared => false,
2767 PointerKind::UniqueOwned => true,
2768 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2771 attrs.set(ArgAttribute::NoAlias);
2774 if kind == PointerKind::Frozen && !is_return {
2775 attrs.set(ArgAttribute::ReadOnly);
2781 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2782 let is_return = arg_idx.is_none();
2784 let layout = cx.layout_of(ty);
2785 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2786 // Don't pass the vtable, it's not an argument of the virtual fn.
2787 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2788 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2789 make_thin_self_ptr(cx, layout)
2794 let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2795 let mut attrs = ArgAttributes::new();
2796 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2800 if arg.layout.is_zst() {
2801 // For some forsaken reason, x86_64-pc-windows-gnu
2802 // doesn't ignore zero-sized struct arguments.
2803 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2807 && !linux_s390x_gnu_like
2808 && !linux_sparc64_gnu_like
2809 && !linux_powerpc_gnu_like)
2811 arg.mode = PassMode::Ignore;
2818 let mut fn_abi = FnAbi {
2819 ret: arg_of(sig.output(), None),
2824 .chain(caller_location)
2826 .map(|(i, ty)| arg_of(ty, Some(i)))
2828 c_variadic: sig.c_variadic,
2829 fixed_count: inputs.len(),
2831 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2833 fn_abi.adjust_for_abi(cx, sig.abi);
2834 debug!("FnAbi::new_internal = {:?}", fn_abi);
2838 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2839 if abi == SpecAbi::Unadjusted {
2843 if abi == SpecAbi::Rust
2844 || abi == SpecAbi::RustCall
2845 || abi == SpecAbi::RustIntrinsic
2846 || abi == SpecAbi::PlatformIntrinsic
2848 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2849 if arg.is_ignore() {
2853 match arg.layout.abi {
2854 Abi::Aggregate { .. } => {}
2856 // This is a fun case! The gist of what this is doing is
2857 // that we want callers and callees to always agree on the
2858 // ABI of how they pass SIMD arguments. If we were to *not*
2859 // make these arguments indirect then they'd be immediates
2860 // in LLVM, which means that they'd used whatever the
2861 // appropriate ABI is for the callee and the caller. That
2862 // means, for example, if the caller doesn't have AVX
2863 // enabled but the callee does, then passing an AVX argument
2864 // across this boundary would cause corrupt data to show up.
2866 // This problem is fixed by unconditionally passing SIMD
2867 // arguments through memory between callers and callees
2868 // which should get them all to agree on ABI regardless of
2869 // target feature sets. Some more information about this
2870 // issue can be found in #44367.
2872 // Note that the platform intrinsic ABI is exempt here as
2873 // that's how we connect up to LLVM and it's unstable
2874 // anyway, we control all calls to it in libstd.
2876 if abi != SpecAbi::PlatformIntrinsic
2877 && cx.tcx().sess.target.simd_types_indirect =>
2879 arg.make_indirect();
2886 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2887 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2888 let max_by_val_size = Pointer.size(cx) * 2;
2889 let size = arg.layout.size;
2891 if arg.layout.is_unsized() || size > max_by_val_size {
2892 arg.make_indirect();
2894 // We want to pass small aggregates as immediates, but using
2895 // a LLVM aggregate type for this leads to bad optimizations,
2896 // so we pick an appropriately sized integer type instead.
2897 arg.cast_to(Reg { kind: RegKind::Integer, size });
2900 fixup(&mut self.ret);
2901 for arg in &mut self.args {
2907 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2908 cx.tcx().sess.fatal(&msg);
2913 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
2915 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2917 + HasParamEnv<'tcx>,
2919 let fat_pointer_ty = if layout.is_unsized() {
2920 // unsized `self` is passed as a pointer to `self`
2921 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2922 cx.tcx().mk_mut_ptr(layout.ty)
2925 Abi::ScalarPair(..) => (),
2926 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2929 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2930 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2931 // elsewhere in the compiler as a method on a `dyn Trait`.
2932 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2933 // get a built-in pointer type
2934 let mut fat_pointer_layout = layout;
2935 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2936 && !fat_pointer_layout.ty.is_region_ptr()
2938 for i in 0..fat_pointer_layout.fields.count() {
2939 let field_layout = fat_pointer_layout.field(cx, i);
2941 if !field_layout.is_zst() {
2942 fat_pointer_layout = field_layout;
2943 continue 'descend_newtypes;
2947 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2950 fat_pointer_layout.ty
2953 // we now have a type like `*mut RcBox<dyn Trait>`
2954 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2955 // this is understood as a special case elsewhere in the compiler
2956 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2957 layout = cx.layout_of(unit_pointer_ty);
2958 layout.ty = fat_pointer_ty;