1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44 impl IntegerExt for Integer {
45 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
46 match (*self, signed) {
47 (I8, false) => tcx.types.u8,
48 (I16, false) => tcx.types.u16,
49 (I32, false) => tcx.types.u32,
50 (I64, false) => tcx.types.u64,
51 (I128, false) => tcx.types.u128,
52 (I8, true) => tcx.types.i8,
53 (I16, true) => tcx.types.i16,
54 (I32, true) => tcx.types.i32,
55 (I64, true) => tcx.types.i64,
56 (I128, true) => tcx.types.i128,
60 /// Gets the Integer type from an attr::IntType.
61 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
62 let dl = cx.data_layout();
65 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
66 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
67 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
68 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
69 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
70 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
71 dl.ptr_sized_integer()
76 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
79 ty::IntTy::I16 => I16,
80 ty::IntTy::I32 => I32,
81 ty::IntTy::I64 => I64,
82 ty::IntTy::I128 => I128,
83 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
86 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
89 ty::UintTy::U16 => I16,
90 ty::UintTy::U32 => I32,
91 ty::UintTy::U64 => I64,
92 ty::UintTy::U128 => I128,
93 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
97 /// Finds the appropriate Integer type and signedness for the given
98 /// signed discriminant range and `#[repr]` attribute.
99 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
100 /// that shouldn't affect anything, other than maybe debuginfo.
107 ) -> (Integer, bool) {
108 // Theoretically, negative values could be larger in unsigned representation
109 // than the unsigned representation of the signed minimum. However, if there
110 // are any negative values, the only valid unsigned representation is u128
111 // which can fit all i128 values, so the result remains unaffected.
112 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
113 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
115 let mut min_from_extern = None;
116 let min_default = I8;
118 if let Some(ity) = repr.int {
119 let discr = Integer::from_attr(&tcx, ity);
120 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
123 "Integer::repr_discr: `#[repr]` hint too small for \
124 discriminant range of enum `{}",
128 return (discr, ity.is_signed());
132 match &tcx.sess.target.arch[..] {
133 "hexagon" => min_from_extern = Some(I8),
134 // WARNING: the ARM EABI has two variants; the one corresponding
135 // to `at_least == I32` appears to be used on Linux and NetBSD,
136 // but some systems may use the variant corresponding to no
137 // lower bound. However, we don't run on those yet...?
138 "arm" => min_from_extern = Some(I32),
139 _ => min_from_extern = Some(I32),
143 let at_least = min_from_extern.unwrap_or(min_default);
145 // If there are no negative values, we can use the unsigned fit.
147 (cmp::max(unsigned_fit, at_least), false)
149 (cmp::max(signed_fit, at_least), true)
154 pub trait PrimitiveExt {
155 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
156 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
159 impl PrimitiveExt for Primitive {
160 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
162 Int(i, signed) => i.to_ty(tcx, signed),
163 F32 => tcx.types.f32,
164 F64 => tcx.types.f64,
165 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
169 /// Return an *integer* type matching this primitive.
170 /// Useful in particular when dealing with enum discriminants.
171 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173 Int(i, signed) => i.to_ty(tcx, signed),
174 Pointer => tcx.types.usize,
175 F32 | F64 => bug!("floats do not have an int type"),
180 /// The first half of a fat pointer.
182 /// - For a trait object, this is the address of the box.
183 /// - For a slice, this is the base address.
184 pub const FAT_PTR_ADDR: usize = 0;
186 /// The second half of a fat pointer.
188 /// - For a trait object, this is the address of the vtable.
189 /// - For a slice, this is the length.
190 pub const FAT_PTR_EXTRA: usize = 1;
192 /// The maximum supported number of lanes in a SIMD vector.
194 /// This value is selected based on backend support:
195 /// * LLVM does not appear to have a vector width limit.
196 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
197 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
200 pub enum LayoutError<'tcx> {
202 SizeOverflow(Ty<'tcx>),
205 impl<'tcx> fmt::Display for LayoutError<'tcx> {
206 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
209 LayoutError::SizeOverflow(ty) => {
210 write!(f, "values of the type `{}` are too big for the current architecture", ty)
218 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
219 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
220 ty::tls::with_related_context(tcx, move |icx| {
221 let (param_env, ty) = query.into_parts();
223 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
224 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
227 // Update the ImplicitCtxt to increase the layout_depth
228 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
230 ty::tls::enter_context(&icx, |_| {
231 let cx = LayoutCx { tcx, param_env };
232 let layout = cx.layout_raw_uncached(ty);
233 // Type-level uninhabitedness should always imply ABI uninhabitedness.
234 if let Ok(layout) = layout {
235 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
236 assert!(layout.abi.is_uninhabited());
244 pub fn provide(providers: &mut ty::query::Providers) {
245 *providers = ty::query::Providers { layout_raw, ..*providers };
248 pub struct LayoutCx<'tcx, C> {
250 pub param_env: ty::ParamEnv<'tcx>,
253 #[derive(Copy, Clone, Debug)]
255 /// A tuple, closure, or univariant which cannot be coerced to unsized.
257 /// A univariant, the last field of which may be coerced to unsized.
259 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
260 Prefixed(Size, Align),
263 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
264 // This is used to go between `memory_index` (source field order to memory order)
265 // and `inverse_memory_index` (memory order to source field order).
266 // See also `FieldsShape::Arbitrary::memory_index` for more details.
267 // FIXME(eddyb) build a better abstraction for permutations, if possible.
268 fn invert_mapping(map: &[u32]) -> Vec<u32> {
269 let mut inverse = vec![0; map.len()];
270 for i in 0..map.len() {
271 inverse[map[i] as usize] = i as u32;
276 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
277 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
278 let dl = self.data_layout();
279 let b_align = b.value.align(dl);
280 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
281 let b_offset = a.value.size(dl).align_to(b_align.abi);
282 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
284 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
285 // returns the last maximum.
286 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
288 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
289 .max_by_key(|niche| niche.available(dl));
292 variants: Variants::Single { index: VariantIdx::new(0) },
293 fields: FieldsShape::Arbitrary {
294 offsets: vec![Size::ZERO, b_offset],
295 memory_index: vec![0, 1],
297 abi: Abi::ScalarPair(a, b),
304 fn univariant_uninterned(
307 fields: &[TyAndLayout<'_>],
310 ) -> Result<Layout, LayoutError<'tcx>> {
311 let dl = self.data_layout();
312 let pack = repr.pack;
313 if pack.is_some() && repr.align.is_some() {
314 bug!("struct cannot be packed and aligned");
317 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
319 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
321 let optimize = !repr.inhibit_struct_field_reordering_opt();
324 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
325 let optimizing = &mut inverse_memory_index[..end];
326 let field_align = |f: &TyAndLayout<'_>| {
327 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
330 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
331 optimizing.sort_by_key(|&x| {
332 // Place ZSTs first to avoid "interesting offsets",
333 // especially with only one or two non-ZST fields.
334 let f = &fields[x as usize];
335 (!f.is_zst(), cmp::Reverse(field_align(f)))
338 StructKind::Prefixed(..) => {
339 // Sort in ascending alignment so that the layout stay optimal
340 // regardless of the prefix
341 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
346 // inverse_memory_index holds field indices by increasing memory offset.
347 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
348 // We now write field offsets to the corresponding offset slot;
349 // field 5 with offset 0 puts 0 in offsets[5].
350 // At the bottom of this function, we invert `inverse_memory_index` to
351 // produce `memory_index` (see `invert_mapping`).
353 let mut sized = true;
354 let mut offsets = vec![Size::ZERO; fields.len()];
355 let mut offset = Size::ZERO;
356 let mut largest_niche = None;
357 let mut largest_niche_available = 0;
359 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
361 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
362 align = align.max(AbiAndPrefAlign::new(prefix_align));
363 offset = prefix_size.align_to(prefix_align);
366 for &i in &inverse_memory_index {
367 let field = fields[i as usize];
369 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
372 if field.is_unsized() {
376 // Invariant: offset < dl.obj_size_bound() <= 1<<61
377 let field_align = if let Some(pack) = pack {
378 field.align.min(AbiAndPrefAlign::new(pack))
382 offset = offset.align_to(field_align.abi);
383 align = align.max(field_align);
385 debug!("univariant offset: {:?} field: {:#?}", offset, field);
386 offsets[i as usize] = offset;
388 if !repr.hide_niche() {
389 if let Some(mut niche) = field.largest_niche.clone() {
390 let available = niche.available(dl);
391 if available > largest_niche_available {
392 largest_niche_available = available;
393 niche.offset += offset;
394 largest_niche = Some(niche);
399 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
402 if let Some(repr_align) = repr.align {
403 align = align.max(AbiAndPrefAlign::new(repr_align));
406 debug!("univariant min_size: {:?}", offset);
407 let min_size = offset;
409 // As stated above, inverse_memory_index holds field indices by increasing offset.
410 // This makes it an already-sorted view of the offsets vec.
411 // To invert it, consider:
412 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
413 // Field 5 would be the first element, so memory_index is i:
414 // Note: if we didn't optimize, it's already right.
417 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
419 let size = min_size.align_to(align.abi);
420 let mut abi = Abi::Aggregate { sized };
422 // Unpack newtype ABIs and find scalar pairs.
423 if sized && size.bytes() > 0 {
424 // All other fields must be ZSTs.
425 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
427 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
428 // We have exactly one non-ZST field.
429 (Some((i, field)), None, None) => {
430 // Field fills the struct and it has a scalar or scalar pair ABI.
431 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
434 // For plain scalars, or vectors of them, we can't unpack
435 // newtypes for `#[repr(C)]`, as that affects C ABIs.
436 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
437 abi = field.abi.clone();
439 // But scalar pairs are Rust-specific and get
440 // treated as aggregates by C ABIs anyway.
441 Abi::ScalarPair(..) => {
442 abi = field.abi.clone();
449 // Two non-ZST fields, and they're both scalars.
451 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
452 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
455 // Order by the memory placement, not source order.
456 let ((i, a), (j, b)) =
457 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
458 let pair = self.scalar_pair(a.clone(), b.clone());
459 let pair_offsets = match pair.fields {
460 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
461 assert_eq!(memory_index, &[0, 1]);
466 if offsets[i] == pair_offsets[0]
467 && offsets[j] == pair_offsets[1]
468 && align == pair.align
471 // We can use `ScalarPair` only when it matches our
472 // already computed layout (including `#[repr(C)]`).
481 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
482 abi = Abi::Uninhabited;
486 variants: Variants::Single { index: VariantIdx::new(0) },
487 fields: FieldsShape::Arbitrary { offsets, memory_index },
495 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
497 let param_env = self.param_env;
498 let dl = self.data_layout();
499 let scalar_unit = |value: Primitive| {
500 let bits = value.size(dl).bits();
501 assert!(bits <= 128);
502 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
504 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
506 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
507 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
509 debug_assert!(!ty.has_infer_types_or_consts());
511 Ok(match *ty.kind() {
513 ty::Bool => tcx.intern_layout(Layout::scalar(
515 Scalar { value: Int(I8, false), valid_range: 0..=1 },
517 ty::Char => tcx.intern_layout(Layout::scalar(
519 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
521 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
522 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
523 ty::Float(fty) => scalar(match fty {
524 ty::FloatTy::F32 => F32,
525 ty::FloatTy::F64 => F64,
528 let mut ptr = scalar_unit(Pointer);
529 ptr.valid_range = 1..=*ptr.valid_range.end();
530 tcx.intern_layout(Layout::scalar(self, ptr))
534 ty::Never => tcx.intern_layout(Layout {
535 variants: Variants::Single { index: VariantIdx::new(0) },
536 fields: FieldsShape::Primitive,
537 abi: Abi::Uninhabited,
543 // Potentially-wide pointers.
544 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
545 let mut data_ptr = scalar_unit(Pointer);
546 if !ty.is_unsafe_ptr() {
547 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
550 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
551 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
552 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
555 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
556 let metadata = match unsized_part.kind() {
558 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
560 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
562 let mut vtable = scalar_unit(Pointer);
563 vtable.valid_range = 1..=*vtable.valid_range.end();
566 _ => return Err(LayoutError::Unknown(unsized_part)),
569 // Effectively a (ptr, meta) tuple.
570 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
573 // Arrays and slices.
574 ty::Array(element, mut count) => {
575 if count.has_projections() {
576 count = tcx.normalize_erasing_regions(param_env, count);
577 if count.has_projections() {
578 return Err(LayoutError::Unknown(ty));
582 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
583 let element = self.layout_of(element)?;
585 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
588 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
591 Abi::Aggregate { sized: true }
594 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
596 tcx.intern_layout(Layout {
597 variants: Variants::Single { index: VariantIdx::new(0) },
598 fields: FieldsShape::Array { stride: element.size, count },
601 align: element.align,
605 ty::Slice(element) => {
606 let element = self.layout_of(element)?;
607 tcx.intern_layout(Layout {
608 variants: Variants::Single { index: VariantIdx::new(0) },
609 fields: FieldsShape::Array { stride: element.size, count: 0 },
610 abi: Abi::Aggregate { sized: false },
612 align: element.align,
616 ty::Str => tcx.intern_layout(Layout {
617 variants: Variants::Single { index: VariantIdx::new(0) },
618 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
619 abi: Abi::Aggregate { sized: false },
626 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
627 ty::Dynamic(..) | ty::Foreign(..) => {
628 let mut unit = self.univariant_uninterned(
631 &ReprOptions::default(),
632 StructKind::AlwaysSized,
635 Abi::Aggregate { ref mut sized } => *sized = false,
638 tcx.intern_layout(unit)
641 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
643 ty::Closure(_, ref substs) => {
644 let tys = substs.as_closure().upvar_tys();
646 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
647 &ReprOptions::default(),
648 StructKind::AlwaysSized,
654 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
658 .map(|k| self.layout_of(k.expect_ty()))
659 .collect::<Result<Vec<_>, _>>()?,
660 &ReprOptions::default(),
665 // SIMD vector types.
666 ty::Adt(def, substs) if def.repr.simd() => {
667 // Supported SIMD vectors are homogeneous ADTs with at least one field:
669 // * #[repr(simd)] struct S(T, T, T, T);
670 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
671 // * #[repr(simd)] struct S([T; 4])
673 // where T is a primitive scalar (integer/float/pointer).
675 // SIMD vectors with zero fields are not supported.
676 // (should be caught by typeck)
677 if def.non_enum_variant().fields.is_empty() {
678 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
681 // Type of the first ADT field:
682 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
684 // Heterogeneous SIMD vectors are not supported:
685 // (should be caught by typeck)
686 for fi in &def.non_enum_variant().fields {
687 if fi.ty(tcx, substs) != f0_ty {
688 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
692 // The element type and number of elements of the SIMD vector
693 // are obtained from:
695 // * the element type and length of the single array field, if
696 // the first field is of array type, or
698 // * the homogenous field type and the number of fields.
699 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
700 // First ADT field is an array:
702 // SIMD vectors with multiple array fields are not supported:
703 // (should be caught by typeck)
704 if def.non_enum_variant().fields.len() != 1 {
705 tcx.sess.fatal(&format!(
706 "monomorphising SIMD type `{}` with more than one array field",
711 // Extract the number of elements from the layout of the array field:
712 let len = if let Ok(TyAndLayout {
713 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
715 }) = self.layout_of(f0_ty)
719 return Err(LayoutError::Unknown(ty));
724 // First ADT field is not an array:
725 (f0_ty, def.non_enum_variant().fields.len() as _, false)
728 // SIMD vectors of zero length are not supported.
729 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
732 // Can't be caught in typeck if the array length is generic.
734 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
735 } else if e_len > MAX_SIMD_LANES {
736 tcx.sess.fatal(&format!(
737 "monomorphising SIMD type `{}` of length greater than {}",
742 // Compute the ABI of the element type:
743 let e_ly = self.layout_of(e_ty)?;
744 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
747 // This error isn't caught in typeck, e.g., if
748 // the element type of the vector is generic.
749 tcx.sess.fatal(&format!(
750 "monomorphising SIMD type `{}` with a non-primitive-scalar \
751 (integer/float/pointer) element type `{}`",
756 // Compute the size and alignment of the vector:
757 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
758 let align = dl.vector_align(size);
759 let size = size.align_to(align.abi);
761 // Compute the placement of the vector fields:
762 let fields = if is_array {
763 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
765 FieldsShape::Array { stride: e_ly.size, count: e_len }
768 tcx.intern_layout(Layout {
769 variants: Variants::Single { index: VariantIdx::new(0) },
771 abi: Abi::Vector { element: e_abi, count: e_len },
772 largest_niche: e_ly.largest_niche.clone(),
779 ty::Adt(def, substs) => {
780 // Cache the field layouts.
787 .map(|field| self.layout_of(field.ty(tcx, substs)))
788 .collect::<Result<Vec<_>, _>>()
790 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
793 if def.repr.pack.is_some() && def.repr.align.is_some() {
794 bug!("union cannot be packed and aligned");
798 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
800 if let Some(repr_align) = def.repr.align {
801 align = align.max(AbiAndPrefAlign::new(repr_align));
804 let optimize = !def.repr.inhibit_union_abi_opt();
805 let mut size = Size::ZERO;
806 let mut abi = Abi::Aggregate { sized: true };
807 let index = VariantIdx::new(0);
808 for field in &variants[index] {
809 assert!(!field.is_unsized());
810 align = align.max(field.align);
812 // If all non-ZST fields have the same ABI, forward this ABI
813 if optimize && !field.is_zst() {
814 // Normalize scalar_unit to the maximal valid range
815 let field_abi = match &field.abi {
816 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
817 Abi::ScalarPair(x, y) => {
818 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
820 Abi::Vector { element: x, count } => {
821 Abi::Vector { element: scalar_unit(x.value), count: *count }
823 Abi::Uninhabited | Abi::Aggregate { .. } => {
824 Abi::Aggregate { sized: true }
828 if size == Size::ZERO {
829 // first non ZST: initialize 'abi'
831 } else if abi != field_abi {
832 // different fields have different ABI: reset to Aggregate
833 abi = Abi::Aggregate { sized: true };
837 size = cmp::max(size, field.size);
840 if let Some(pack) = def.repr.pack {
841 align = align.min(AbiAndPrefAlign::new(pack));
844 return Ok(tcx.intern_layout(Layout {
845 variants: Variants::Single { index },
846 fields: FieldsShape::Union(
847 NonZeroUsize::new(variants[index].len())
848 .ok_or(LayoutError::Unknown(ty))?,
853 size: size.align_to(align.abi),
857 // A variant is absent if it's uninhabited and only has ZST fields.
858 // Present uninhabited variants only require space for their fields,
859 // but *not* an encoding of the discriminant (e.g., a tag value).
860 // See issue #49298 for more details on the need to leave space
861 // for non-ZST uninhabited data (mostly partial initialization).
862 let absent = |fields: &[TyAndLayout<'_>]| {
863 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
864 let is_zst = fields.iter().all(|f| f.is_zst());
865 uninhabited && is_zst
867 let (present_first, present_second) = {
868 let mut present_variants = variants
870 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
871 (present_variants.next(), present_variants.next())
873 let present_first = match present_first {
874 Some(present_first) => present_first,
875 // Uninhabited because it has no variants, or only absent ones.
876 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
877 // If it's a struct, still compute a layout so that we can still compute the
879 None => VariantIdx::new(0),
882 let is_struct = !def.is_enum() ||
883 // Only one variant is present.
884 (present_second.is_none() &&
885 // Representation optimizations are allowed.
886 !def.repr.inhibit_enum_layout_opt());
888 // Struct, or univariant enum equivalent to a struct.
889 // (Typechecking will reject discriminant-sizing attrs.)
891 let v = present_first;
892 let kind = if def.is_enum() || variants[v].is_empty() {
893 StructKind::AlwaysSized
895 let param_env = tcx.param_env(def.did);
896 let last_field = def.variants[v].fields.last().unwrap();
898 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
900 StructKind::MaybeUnsized
902 StructKind::AlwaysSized
906 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
907 st.variants = Variants::Single { index: v };
908 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
910 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
911 // the asserts ensure that we are not using the
912 // `#[rustc_layout_scalar_valid_range(n)]`
913 // attribute to widen the range of anything as that would probably
914 // result in UB somewhere
915 // FIXME(eddyb) the asserts are probably not needed,
916 // as larger validity ranges would result in missed
917 // optimizations, *not* wrongly assuming the inner
918 // value is valid. e.g. unions enlarge validity ranges,
919 // because the values may be uninitialized.
920 if let Bound::Included(start) = start {
921 // FIXME(eddyb) this might be incorrect - it doesn't
922 // account for wrap-around (end < start) ranges.
923 assert!(*scalar.valid_range.start() <= start);
924 scalar.valid_range = start..=*scalar.valid_range.end();
926 if let Bound::Included(end) = end {
927 // FIXME(eddyb) this might be incorrect - it doesn't
928 // account for wrap-around (end < start) ranges.
929 assert!(*scalar.valid_range.end() >= end);
930 scalar.valid_range = *scalar.valid_range.start()..=end;
933 // Update `largest_niche` if we have introduced a larger niche.
934 let niche = if def.repr.hide_niche() {
937 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
939 if let Some(niche) = niche {
940 match &st.largest_niche {
941 Some(largest_niche) => {
942 // Replace the existing niche even if they're equal,
943 // because this one is at a lower offset.
944 if largest_niche.available(dl) <= niche.available(dl) {
945 st.largest_niche = Some(niche);
948 None => st.largest_niche = Some(niche),
953 start == Bound::Unbounded && end == Bound::Unbounded,
954 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
960 return Ok(tcx.intern_layout(st));
963 // At this point, we have handled all unions and
964 // structs. (We have also handled univariant enums
965 // that allow representation optimization.)
966 assert!(def.is_enum());
968 // The current code for niche-filling relies on variant indices
969 // instead of actual discriminants, so dataful enums with
970 // explicit discriminants (RFC #2363) would misbehave.
971 let no_explicit_discriminants = def
974 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
976 let mut niche_filling_layout = None;
978 // Niche-filling enum optimization.
979 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
980 let mut dataful_variant = None;
981 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
983 // Find one non-ZST variant.
984 'variants: for (v, fields) in variants.iter_enumerated() {
990 if dataful_variant.is_none() {
991 dataful_variant = Some(v);
994 dataful_variant = None;
999 niche_variants = *niche_variants.start().min(&v)..=v;
1002 if niche_variants.start() > niche_variants.end() {
1003 dataful_variant = None;
1006 if let Some(i) = dataful_variant {
1007 let count = (niche_variants.end().as_u32()
1008 - niche_variants.start().as_u32()
1011 // Find the field with the largest niche
1012 let niche_candidate = variants[i]
1015 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1016 .max_by_key(|(_, niche)| niche.available(dl));
1018 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1019 niche_candidate.and_then(|(field_index, niche)| {
1020 Some((field_index, niche, niche.reserve(self, count)?))
1023 let mut align = dl.aggregate_align;
1027 let mut st = self.univariant_uninterned(
1031 StructKind::AlwaysSized,
1033 st.variants = Variants::Single { index: j };
1035 align = align.max(st.align);
1039 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1041 let offset = st[i].fields.offset(field_index) + niche.offset;
1042 let size = st[i].size;
1044 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1048 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1049 Abi::ScalarPair(ref first, ref second) => {
1050 // We need to use scalar_unit to reset the
1051 // valid range to the maximal one for that
1052 // primitive, because only the niche is
1053 // guaranteed to be initialised, not the
1055 if offset.bytes() == 0 {
1057 niche_scalar.clone(),
1058 scalar_unit(second.value),
1062 scalar_unit(first.value),
1063 niche_scalar.clone(),
1067 _ => Abi::Aggregate { sized: true },
1072 Niche::from_scalar(dl, offset, niche_scalar.clone());
1074 niche_filling_layout = Some(Layout {
1075 variants: Variants::Multiple {
1077 tag_encoding: TagEncoding::Niche {
1085 fields: FieldsShape::Arbitrary {
1086 offsets: vec![offset],
1087 memory_index: vec![0],
1098 let (mut min, mut max) = (i128::MAX, i128::MIN);
1099 let discr_type = def.repr.discr_type();
1100 let bits = Integer::from_attr(self, discr_type).size().bits();
1101 for (i, discr) in def.discriminants(tcx) {
1102 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1105 let mut x = discr.val as i128;
1106 if discr_type.is_signed() {
1107 // sign extend the raw representation to be an i128
1108 x = (x << (128 - bits)) >> (128 - bits);
1117 // We might have no inhabited variants, so pretend there's at least one.
1118 if (min, max) == (i128::MAX, i128::MIN) {
1122 assert!(min <= max, "discriminant range is {}...{}", min, max);
1123 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1125 let mut align = dl.aggregate_align;
1126 let mut size = Size::ZERO;
1128 // We're interested in the smallest alignment, so start large.
1129 let mut start_align = Align::from_bytes(256).unwrap();
1130 assert_eq!(Integer::for_align(dl, start_align), None);
1132 // repr(C) on an enum tells us to make a (tag, union) layout,
1133 // so we need to grow the prefix alignment to be at least
1134 // the alignment of the union. (This value is used both for
1135 // determining the alignment of the overall enum, and the
1136 // determining the alignment of the payload after the tag.)
1137 let mut prefix_align = min_ity.align(dl).abi;
1139 for fields in &variants {
1140 for field in fields {
1141 prefix_align = prefix_align.max(field.align.abi);
1146 // Create the set of structs that represent each variant.
1147 let mut layout_variants = variants
1149 .map(|(i, field_layouts)| {
1150 let mut st = self.univariant_uninterned(
1154 StructKind::Prefixed(min_ity.size(), prefix_align),
1156 st.variants = Variants::Single { index: i };
1157 // Find the first field we can't move later
1158 // to make room for a larger discriminant.
1160 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1162 if !field.is_zst() || field.align.abi.bytes() != 1 {
1163 start_align = start_align.min(field.align.abi);
1167 size = cmp::max(size, st.size);
1168 align = align.max(st.align);
1171 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1173 // Align the maximum variant size to the largest alignment.
1174 size = size.align_to(align.abi);
1176 if size.bytes() >= dl.obj_size_bound() {
1177 return Err(LayoutError::SizeOverflow(ty));
1180 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1181 if typeck_ity < min_ity {
1182 // It is a bug if Layout decided on a greater discriminant size than typeck for
1183 // some reason at this point (based on values discriminant can take on). Mostly
1184 // because this discriminant will be loaded, and then stored into variable of
1185 // type calculated by typeck. Consider such case (a bug): typeck decided on
1186 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1187 // discriminant values. That would be a bug, because then, in codegen, in order
1188 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1189 // space necessary to represent would have to be discarded (or layout is wrong
1190 // on thinking it needs 16 bits)
1192 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1196 // However, it is fine to make discr type however large (as an optimisation)
1197 // after this point – we’ll just truncate the value we load in codegen.
1200 // Check to see if we should use a different type for the
1201 // discriminant. We can safely use a type with the same size
1202 // as the alignment of the first field of each variant.
1203 // We increase the size of the discriminant to avoid LLVM copying
1204 // padding when it doesn't need to. This normally causes unaligned
1205 // load/stores and excessive memcpy/memset operations. By using a
1206 // bigger integer size, LLVM can be sure about its contents and
1207 // won't be so conservative.
1209 // Use the initial field alignment
1210 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1213 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1216 // If the alignment is not larger than the chosen discriminant size,
1217 // don't use the alignment as the final size.
1221 // Patch up the variants' first few fields.
1222 let old_ity_size = min_ity.size();
1223 let new_ity_size = ity.size();
1224 for variant in &mut layout_variants {
1225 match variant.fields {
1226 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1228 if *i <= old_ity_size {
1229 assert_eq!(*i, old_ity_size);
1233 // We might be making the struct larger.
1234 if variant.size <= old_ity_size {
1235 variant.size = new_ity_size;
1243 let tag_mask = !0u128 >> (128 - ity.size().bits());
1245 value: Int(ity, signed),
1246 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1248 let mut abi = Abi::Aggregate { sized: true };
1249 if tag.value.size(dl) == size {
1250 abi = Abi::Scalar(tag.clone());
1252 // Try to use a ScalarPair for all tagged enums.
1253 let mut common_prim = None;
1254 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1255 let offsets = match layout_variant.fields {
1256 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1260 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1261 let (field, offset) = match (fields.next(), fields.next()) {
1262 (None, None) => continue,
1263 (Some(pair), None) => pair,
1269 let prim = match field.abi {
1270 Abi::Scalar(ref scalar) => scalar.value,
1276 if let Some(pair) = common_prim {
1277 // This is pretty conservative. We could go fancier
1278 // by conflating things like i32 and u32, or even
1279 // realising that (u8, u8) could just cohabit with
1281 if pair != (prim, offset) {
1286 common_prim = Some((prim, offset));
1289 if let Some((prim, offset)) = common_prim {
1290 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1291 let pair_offsets = match pair.fields {
1292 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1293 assert_eq!(memory_index, &[0, 1]);
1298 if pair_offsets[0] == Size::ZERO
1299 && pair_offsets[1] == *offset
1300 && align == pair.align
1301 && size == pair.size
1303 // We can use `ScalarPair` only when it matches our
1304 // already computed layout (including `#[repr(C)]`).
1310 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1311 abi = Abi::Uninhabited;
1314 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1316 let tagged_layout = Layout {
1317 variants: Variants::Multiple {
1319 tag_encoding: TagEncoding::Direct,
1321 variants: layout_variants,
1323 fields: FieldsShape::Arbitrary {
1324 offsets: vec![Size::ZERO],
1325 memory_index: vec![0],
1333 let best_layout = match (tagged_layout, niche_filling_layout) {
1334 (tagged_layout, Some(niche_filling_layout)) => {
1335 // Pick the smaller layout; otherwise,
1336 // pick the layout with the larger niche; otherwise,
1337 // pick tagged as it has simpler codegen.
1338 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1340 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1341 (layout.size, cmp::Reverse(niche_size))
1344 (tagged_layout, None) => tagged_layout,
1347 tcx.intern_layout(best_layout)
1350 // Types with no meaningful known layout.
1351 ty::Projection(_) | ty::Opaque(..) => {
1352 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1353 if ty == normalized {
1354 return Err(LayoutError::Unknown(ty));
1356 tcx.layout_raw(param_env.and(normalized))?
1359 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1360 bug!("Layout::compute: unexpected type `{}`", ty)
1363 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1364 return Err(LayoutError::Unknown(ty));
1370 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1371 #[derive(Clone, Debug, PartialEq)]
1372 enum SavedLocalEligibility {
1374 Assigned(VariantIdx),
1375 // FIXME: Use newtype_index so we aren't wasting bytes
1376 Ineligible(Option<u32>),
1379 // When laying out generators, we divide our saved local fields into two
1380 // categories: overlap-eligible and overlap-ineligible.
1382 // Those fields which are ineligible for overlap go in a "prefix" at the
1383 // beginning of the layout, and always have space reserved for them.
1385 // Overlap-eligible fields are only assigned to one variant, so we lay
1386 // those fields out for each variant and put them right after the
1389 // Finally, in the layout details, we point to the fields from the
1390 // variants they are assigned to. It is possible for some fields to be
1391 // included in multiple variants. No field ever "moves around" in the
1392 // layout; its offset is always the same.
1394 // Also included in the layout are the upvars and the discriminant.
1395 // These are included as fields on the "outer" layout; they are not part
1397 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1398 /// Compute the eligibility and assignment of each local.
1399 fn generator_saved_local_eligibility(
1401 info: &GeneratorLayout<'tcx>,
1402 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1403 use SavedLocalEligibility::*;
1405 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1406 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1408 // The saved locals not eligible for overlap. These will get
1409 // "promoted" to the prefix of our generator.
1410 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1412 // Figure out which of our saved locals are fields in only
1413 // one variant. The rest are deemed ineligible for overlap.
1414 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1415 for local in fields {
1416 match assignments[*local] {
1418 assignments[*local] = Assigned(variant_index);
1421 // We've already seen this local at another suspension
1422 // point, so it is no longer a candidate.
1424 "removing local {:?} in >1 variant ({:?}, {:?})",
1429 ineligible_locals.insert(*local);
1430 assignments[*local] = Ineligible(None);
1437 // Next, check every pair of eligible locals to see if they
1439 for local_a in info.storage_conflicts.rows() {
1440 let conflicts_a = info.storage_conflicts.count(local_a);
1441 if ineligible_locals.contains(local_a) {
1445 for local_b in info.storage_conflicts.iter(local_a) {
1446 // local_a and local_b are storage live at the same time, therefore they
1447 // cannot overlap in the generator layout. The only way to guarantee
1448 // this is if they are in the same variant, or one is ineligible
1449 // (which means it is stored in every variant).
1450 if ineligible_locals.contains(local_b)
1451 || assignments[local_a] == assignments[local_b]
1456 // If they conflict, we will choose one to make ineligible.
1457 // This is not always optimal; it's just a greedy heuristic that
1458 // seems to produce good results most of the time.
1459 let conflicts_b = info.storage_conflicts.count(local_b);
1460 let (remove, other) =
1461 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1462 ineligible_locals.insert(remove);
1463 assignments[remove] = Ineligible(None);
1464 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1468 // Count the number of variants in use. If only one of them, then it is
1469 // impossible to overlap any locals in our layout. In this case it's
1470 // always better to make the remaining locals ineligible, so we can
1471 // lay them out with the other locals in the prefix and eliminate
1472 // unnecessary padding bytes.
1474 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1475 for assignment in &assignments {
1476 if let Assigned(idx) = assignment {
1477 used_variants.insert(*idx);
1480 if used_variants.count() < 2 {
1481 for assignment in assignments.iter_mut() {
1482 *assignment = Ineligible(None);
1484 ineligible_locals.insert_all();
1488 // Write down the order of our locals that will be promoted to the prefix.
1490 for (idx, local) in ineligible_locals.iter().enumerate() {
1491 assignments[local] = Ineligible(Some(idx as u32));
1494 debug!("generator saved local assignments: {:?}", assignments);
1496 (ineligible_locals, assignments)
1499 /// Compute the full generator layout.
1500 fn generator_layout(
1503 def_id: hir::def_id::DefId,
1504 substs: SubstsRef<'tcx>,
1505 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1506 use SavedLocalEligibility::*;
1508 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1510 let info = match tcx.generator_layout(def_id) {
1511 None => return Err(LayoutError::Unknown(ty)),
1514 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1516 // Build a prefix layout, including "promoting" all ineligible
1517 // locals as part of the prefix. We compute the layout of all of
1518 // these fields at once to get optimal packing.
1519 let tag_index = substs.as_generator().prefix_tys().count();
1521 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1522 let max_discr = (info.variant_fields.len() - 1) as u128;
1523 let discr_int = Integer::fit_unsigned(max_discr);
1524 let discr_int_ty = discr_int.to_ty(tcx, false);
1525 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1526 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1527 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1529 let promoted_layouts = ineligible_locals
1531 .map(|local| subst_field(info.field_tys[local]))
1532 .map(|ty| tcx.mk_maybe_uninit(ty))
1533 .map(|ty| self.layout_of(ty));
1534 let prefix_layouts = substs
1537 .map(|ty| self.layout_of(ty))
1538 .chain(iter::once(Ok(tag_layout)))
1539 .chain(promoted_layouts)
1540 .collect::<Result<Vec<_>, _>>()?;
1541 let prefix = self.univariant_uninterned(
1544 &ReprOptions::default(),
1545 StructKind::AlwaysSized,
1548 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1550 // Split the prefix layout into the "outer" fields (upvars and
1551 // discriminant) and the "promoted" fields. Promoted fields will
1552 // get included in each variant that requested them in
1554 debug!("prefix = {:#?}", prefix);
1555 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1556 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1557 let mut inverse_memory_index = invert_mapping(&memory_index);
1559 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1560 // "outer" and "promoted" fields respectively.
1561 let b_start = (tag_index + 1) as u32;
1562 let offsets_b = offsets.split_off(b_start as usize);
1563 let offsets_a = offsets;
1565 // Disentangle the "a" and "b" components of `inverse_memory_index`
1566 // by preserving the order but keeping only one disjoint "half" each.
1567 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1568 let inverse_memory_index_b: Vec<_> =
1569 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1570 inverse_memory_index.retain(|&i| i < b_start);
1571 let inverse_memory_index_a = inverse_memory_index;
1573 // Since `inverse_memory_index_{a,b}` each only refer to their
1574 // respective fields, they can be safely inverted
1575 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1576 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1579 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1580 (outer_fields, offsets_b, memory_index_b)
1585 let mut size = prefix.size;
1586 let mut align = prefix.align;
1590 .map(|(index, variant_fields)| {
1591 // Only include overlap-eligible fields when we compute our variant layout.
1592 let variant_only_tys = variant_fields
1594 .filter(|local| match assignments[**local] {
1595 Unassigned => bug!(),
1596 Assigned(v) if v == index => true,
1597 Assigned(_) => bug!("assignment does not match variant"),
1598 Ineligible(_) => false,
1600 .map(|local| subst_field(info.field_tys[*local]));
1602 let mut variant = self.univariant_uninterned(
1605 .map(|ty| self.layout_of(ty))
1606 .collect::<Result<Vec<_>, _>>()?,
1607 &ReprOptions::default(),
1608 StructKind::Prefixed(prefix_size, prefix_align.abi),
1610 variant.variants = Variants::Single { index };
1612 let (offsets, memory_index) = match variant.fields {
1613 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1617 // Now, stitch the promoted and variant-only fields back together in
1618 // the order they are mentioned by our GeneratorLayout.
1619 // Because we only use some subset (that can differ between variants)
1620 // of the promoted fields, we can't just pick those elements of the
1621 // `promoted_memory_index` (as we'd end up with gaps).
1622 // So instead, we build an "inverse memory_index", as if all of the
1623 // promoted fields were being used, but leave the elements not in the
1624 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1625 // obtain a valid (bijective) mapping.
1626 const INVALID_FIELD_IDX: u32 = !0;
1627 let mut combined_inverse_memory_index =
1628 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1629 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1630 let combined_offsets = variant_fields
1634 let (offset, memory_index) = match assignments[*local] {
1635 Unassigned => bug!(),
1637 let (offset, memory_index) =
1638 offsets_and_memory_index.next().unwrap();
1639 (offset, promoted_memory_index.len() as u32 + memory_index)
1641 Ineligible(field_idx) => {
1642 let field_idx = field_idx.unwrap() as usize;
1643 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1646 combined_inverse_memory_index[memory_index as usize] = i as u32;
1651 // Remove the unused slots and invert the mapping to obtain the
1652 // combined `memory_index` (also see previous comment).
1653 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1654 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1656 variant.fields = FieldsShape::Arbitrary {
1657 offsets: combined_offsets,
1658 memory_index: combined_memory_index,
1661 size = size.max(variant.size);
1662 align = align.max(variant.align);
1665 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1667 size = size.align_to(align.abi);
1669 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1673 Abi::Aggregate { sized: true }
1676 let layout = tcx.intern_layout(Layout {
1677 variants: Variants::Multiple {
1679 tag_encoding: TagEncoding::Direct,
1680 tag_field: tag_index,
1683 fields: outer_fields,
1685 largest_niche: prefix.largest_niche,
1689 debug!("generator layout ({:?}): {:#?}", ty, layout);
1693 /// This is invoked by the `layout_raw` query to record the final
1694 /// layout of each type.
1696 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1697 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1698 // for dumping later.
1699 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1700 self.record_layout_for_printing_outlined(layout)
1704 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1705 // Ignore layouts that are done with non-empty environments or
1706 // non-monomorphic layouts, as the user only wants to see the stuff
1707 // resulting from the final codegen session.
1708 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1712 // (delay format until we actually need it)
1713 let record = |kind, packed, opt_discr_size, variants| {
1714 let type_desc = format!("{:?}", layout.ty);
1715 self.tcx.sess.code_stats.record_type_size(
1726 let adt_def = match *layout.ty.kind() {
1727 ty::Adt(ref adt_def, _) => {
1728 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1732 ty::Closure(..) => {
1733 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1734 record(DataTypeKind::Closure, false, None, vec![]);
1739 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1744 let adt_kind = adt_def.adt_kind();
1745 let adt_packed = adt_def.repr.pack.is_some();
1747 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1748 let mut min_size = Size::ZERO;
1749 let field_info: Vec<_> = flds
1752 .map(|(i, &name)| match layout.field(self, i) {
1754 bug!("no layout found for field {}: `{:?}`", name, err);
1756 Ok(field_layout) => {
1757 let offset = layout.fields.offset(i);
1758 let field_end = offset + field_layout.size;
1759 if min_size < field_end {
1760 min_size = field_end;
1763 name: name.to_string(),
1764 offset: offset.bytes(),
1765 size: field_layout.size.bytes(),
1766 align: field_layout.align.abi.bytes(),
1773 name: n.map(|n| n.to_string()),
1774 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1775 align: layout.align.abi.bytes(),
1776 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1781 match layout.variants {
1782 Variants::Single { index } => {
1783 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1784 if !adt_def.variants.is_empty() {
1785 let variant_def = &adt_def.variants[index];
1786 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1791 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1794 // (This case arises for *empty* enums; so give it
1796 record(adt_kind.into(), adt_packed, None, vec![]);
1800 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1802 "print-type-size `{:#?}` adt general variants def {}",
1804 adt_def.variants.len()
1806 let variant_infos: Vec<_> = adt_def
1809 .map(|(i, variant_def)| {
1810 let fields: Vec<_> =
1811 variant_def.fields.iter().map(|f| f.ident.name).collect();
1813 Some(variant_def.ident),
1815 layout.for_variant(self, i),
1822 match tag_encoding {
1823 TagEncoding::Direct => Some(tag.value.size(self)),
1833 /// Type size "skeleton", i.e., the only information determining a type's size.
1834 /// While this is conservative, (aside from constant sizes, only pointers,
1835 /// newtypes thereof and null pointer optimized enums are allowed), it is
1836 /// enough to statically check common use cases of transmute.
1837 #[derive(Copy, Clone, Debug)]
1838 pub enum SizeSkeleton<'tcx> {
1839 /// Any statically computable Layout.
1842 /// A potentially-fat pointer.
1844 /// If true, this pointer is never null.
1846 /// The type which determines the unsized metadata, if any,
1847 /// of this pointer. Either a type parameter or a projection
1848 /// depending on one, with regions erased.
1853 impl<'tcx> SizeSkeleton<'tcx> {
1857 param_env: ty::ParamEnv<'tcx>,
1858 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1859 debug_assert!(!ty.has_infer_types_or_consts());
1861 // First try computing a static layout.
1862 let err = match tcx.layout_of(param_env.and(ty)) {
1864 return Ok(SizeSkeleton::Known(layout.size));
1870 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1871 let non_zero = !ty.is_unsafe_ptr();
1872 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1874 ty::Param(_) | ty::Projection(_) => {
1875 debug_assert!(tail.has_param_types_or_consts());
1876 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1879 "SizeSkeleton::compute({}): layout errored ({}), yet \
1880 tail `{}` is not a type parameter or a projection",
1888 ty::Adt(def, substs) => {
1889 // Only newtypes and enums w/ nullable pointer optimization.
1890 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1894 // Get a zero-sized variant or a pointer newtype.
1895 let zero_or_ptr_variant = |i| {
1896 let i = VariantIdx::new(i);
1897 let fields = def.variants[i]
1900 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1902 for field in fields {
1905 SizeSkeleton::Known(size) => {
1906 if size.bytes() > 0 {
1910 SizeSkeleton::Pointer { .. } => {
1921 let v0 = zero_or_ptr_variant(0)?;
1923 if def.variants.len() == 1 {
1924 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1925 return Ok(SizeSkeleton::Pointer {
1927 || match tcx.layout_scalar_valid_range(def.did) {
1928 (Bound::Included(start), Bound::Unbounded) => start > 0,
1929 (Bound::Included(start), Bound::Included(end)) => {
1930 0 < start && start < end
1941 let v1 = zero_or_ptr_variant(1)?;
1942 // Nullable pointer enum optimization.
1944 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1945 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1946 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1952 ty::Projection(_) | ty::Opaque(..) => {
1953 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1954 if ty == normalized {
1957 SizeSkeleton::compute(normalized, tcx, param_env)
1965 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1966 match (self, other) {
1967 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1968 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1976 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1977 fn tcx(&self) -> TyCtxt<'tcx>;
1980 pub trait HasParamEnv<'tcx> {
1981 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1984 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1985 fn data_layout(&self) -> &TargetDataLayout {
1990 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1991 fn tcx(&self) -> TyCtxt<'tcx> {
1996 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1997 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2002 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2003 fn data_layout(&self) -> &TargetDataLayout {
2004 self.tcx.data_layout()
2008 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2009 fn tcx(&self) -> TyCtxt<'tcx> {
2014 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2016 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
2018 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2020 /// Computes the layout of a type. Note that this implicitly
2021 /// executes in "reveal all" mode.
2022 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2023 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
2024 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2025 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2026 let layout = TyAndLayout { ty, layout };
2028 // N.B., this recording is normally disabled; when enabled, it
2029 // can however trigger recursive invocations of `layout_of`.
2030 // Therefore, we execute it *after* the main query has
2031 // completed, to avoid problems around recursive structures
2032 // and the like. (Admittedly, I wasn't able to reproduce a problem
2033 // here, but it seems like the right thing to do. -nmatsakis)
2034 self.record_layout_for_printing(layout);
2040 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2042 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2044 /// Computes the layout of a type. Note that this implicitly
2045 /// executes in "reveal all" mode.
2046 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2047 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2048 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2049 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2050 let layout = TyAndLayout { ty, layout };
2052 // N.B., this recording is normally disabled; when enabled, it
2053 // can however trigger recursive invocations of `layout_of`.
2054 // Therefore, we execute it *after* the main query has
2055 // completed, to avoid problems around recursive structures
2056 // and the like. (Admittedly, I wasn't able to reproduce a problem
2057 // here, but it seems like the right thing to do. -nmatsakis)
2058 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2059 cx.record_layout_for_printing(layout);
2065 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2067 /// Computes the layout of a type. Note that this implicitly
2068 /// executes in "reveal all" mode.
2072 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2073 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2074 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2075 cx.layout_of(param_env_and_ty.value)
2079 impl ty::query::TyCtxtAt<'tcx> {
2080 /// Computes the layout of a type. Note that this implicitly
2081 /// executes in "reveal all" mode.
2085 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2086 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2087 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2088 cx.layout_of(param_env_and_ty.value)
2092 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2094 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2096 + HasParamEnv<'tcx>,
2099 this: TyAndLayout<'tcx>,
2101 variant_index: VariantIdx,
2102 ) -> TyAndLayout<'tcx> {
2103 let layout = match this.variants {
2104 Variants::Single { index }
2105 // If all variants but one are uninhabited, the variant layout is the enum layout.
2106 if index == variant_index &&
2107 // Don't confuse variants of uninhabited enums with the enum itself.
2108 // For more details see https://github.com/rust-lang/rust/issues/69763.
2109 this.fields != FieldsShape::Primitive =>
2114 Variants::Single { index } => {
2115 // Deny calling for_variant more than once for non-Single enums.
2116 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2117 assert_eq!(original_layout.variants, Variants::Single { index });
2120 let fields = match this.ty.kind() {
2121 ty::Adt(def, _) if def.variants.is_empty() =>
2122 bug!("for_variant called on zero-variant enum"),
2123 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2127 tcx.intern_layout(Layout {
2128 variants: Variants::Single { index: variant_index },
2129 fields: match NonZeroUsize::new(fields) {
2130 Some(fields) => FieldsShape::Union(fields),
2131 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2133 abi: Abi::Uninhabited,
2134 largest_niche: None,
2135 align: tcx.data_layout.i8_align,
2140 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2143 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2145 TyAndLayout { ty: this.ty, layout }
2148 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2149 enum TyMaybeWithLayout<C: LayoutOf> {
2151 TyAndLayout(C::TyAndLayout),
2154 fn ty_and_layout_kind<
2155 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2157 + HasParamEnv<'tcx>,
2159 this: TyAndLayout<'tcx>,
2163 ) -> TyMaybeWithLayout<C> {
2165 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2166 let layout = Layout::scalar(cx, tag.clone());
2167 MaybeResult::from(Ok(TyAndLayout {
2168 layout: tcx.intern_layout(layout),
2169 ty: tag.value.to_ty(tcx),
2182 | ty::GeneratorWitness(..)
2184 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2186 // Potentially-fat pointers.
2187 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2188 assert!(i < this.fields.count());
2190 // Reuse the fat `*T` type as its own thin pointer data field.
2191 // This provides information about, e.g., DST struct pointees
2192 // (which may have no non-DST form), and will work as long
2193 // as the `Abi` or `FieldsShape` is checked by users.
2195 let nil = tcx.mk_unit();
2196 let ptr_ty = if ty.is_unsafe_ptr() {
2199 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2201 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2202 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2209 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2210 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2211 ty::Dynamic(_, _) => {
2212 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2213 tcx.lifetimes.re_static,
2214 tcx.mk_array(tcx.types.usize, 3),
2216 /* FIXME: use actual fn pointers
2217 Warning: naively computing the number of entries in the
2218 vtable by counting the methods on the trait + methods on
2219 all parent traits does not work, because some methods can
2220 be not object safe and thus excluded from the vtable.
2221 Increase this counter if you tried to implement this but
2222 failed to do it without duplicating a lot of code from
2223 other places in the compiler: 2
2225 tcx.mk_array(tcx.types.usize, 3),
2226 tcx.mk_array(Option<fn()>),
2230 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2234 // Arrays and slices.
2235 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2236 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2238 // Tuples, generators and closures.
2239 ty::Closure(_, ref substs) => {
2240 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2243 ty::Generator(def_id, ref substs, _) => match this.variants {
2244 Variants::Single { index } => TyMaybeWithLayout::Ty(
2247 .state_tys(def_id, tcx)
2248 .nth(index.as_usize())
2253 Variants::Multiple { ref tag, tag_field, .. } => {
2255 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2257 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2261 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2264 ty::Adt(def, substs) => {
2265 match this.variants {
2266 Variants::Single { index } => {
2267 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2270 // Discriminant field for enums (where applicable).
2271 Variants::Multiple { ref tag, .. } => {
2273 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2280 | ty::Placeholder(..)
2284 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2288 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2289 TyMaybeWithLayout::Ty(result) => result,
2290 TyMaybeWithLayout::TyAndLayout(result) => return result,
2294 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2295 let addr_space_of_ty = |ty: Ty<'tcx>| {
2296 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2299 let pointee_info = match *this.ty.kind() {
2300 ty::RawPtr(mt) if offset.bytes() == 0 => {
2301 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2303 align: layout.align.abi,
2305 address_space: addr_space_of_ty(mt.ty),
2308 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2309 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2312 align: layout.align.abi,
2314 address_space: cx.data_layout().instruction_address_space,
2318 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2319 let address_space = addr_space_of_ty(ty);
2321 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2322 let kind = match mt {
2323 hir::Mutability::Not => {
2330 hir::Mutability::Mut => {
2331 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2332 // panic=abort mode. That was deemed right, as prior versions had many bugs
2333 // in conjunction with unwinding, but later versions didn’t seem to have
2334 // said issues. See issue #31681.
2336 // Alas, later on we encountered a case where noalias would generate wrong
2337 // code altogether even with recent versions of LLVM in *safe* code with no
2338 // unwinding involved. See #54462.
2340 // For now, do not enable mutable_noalias by default at all, while the
2341 // issue is being figured out.
2342 if tcx.sess.opts.debugging_opts.mutable_noalias {
2343 PointerKind::UniqueBorrowed
2350 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2352 align: layout.align.abi,
2359 let mut data_variant = match this.variants {
2360 // Within the discriminant field, only the niche itself is
2361 // always initialized, so we only check for a pointer at its
2364 // If the niche is a pointer, it's either valid (according
2365 // to its type), or null (which the niche field's scalar
2366 // validity range encodes). This allows using
2367 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2368 // this will continue to work as long as we don't start
2369 // using more niches than just null (e.g., the first page of
2370 // the address space, or unaligned pointers).
2371 Variants::Multiple {
2372 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2375 } if this.fields.offset(tag_field) == offset => {
2376 Some(this.for_variant(cx, dataful_variant))
2381 if let Some(variant) = data_variant {
2382 // We're not interested in any unions.
2383 if let FieldsShape::Union(_) = variant.fields {
2384 data_variant = None;
2388 let mut result = None;
2390 if let Some(variant) = data_variant {
2391 let ptr_end = offset + Pointer.size(cx);
2392 for i in 0..variant.fields.count() {
2393 let field_start = variant.fields.offset(i);
2394 if field_start <= offset {
2395 let field = variant.field(cx, i);
2396 result = field.to_result().ok().and_then(|field| {
2397 if ptr_end <= field_start + field.size {
2398 // We found the right field, look inside it.
2400 field.pointee_info_at(cx, offset - field_start);
2406 if result.is_some() {
2413 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2414 if let Some(ref mut pointee) = result {
2415 if let ty::Adt(def, _) = this.ty.kind() {
2416 if def.is_box() && offset.bytes() == 0 {
2417 pointee.safe = Some(PointerKind::UniqueOwned);
2427 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2437 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2438 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2439 use crate::ty::layout::LayoutError::*;
2440 mem::discriminant(self).hash_stable(hcx, hasher);
2443 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2448 impl<'tcx> ty::Instance<'tcx> {
2449 // NOTE(eddyb) this is private to avoid using it from outside of
2450 // `FnAbi::of_instance` - any other uses are either too high-level
2451 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2452 // or should go through `FnAbi` instead, to avoid losing any
2453 // adjustments `FnAbi::of_instance` might be performing.
2454 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2455 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2456 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2459 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2460 // parameters unused if they show up in the signature, but not in the `mir::Body`
2461 // (i.e. due to being inside a projection that got normalized, see
2462 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2463 // track of a polymorphization `ParamEnv` to allow normalizing later.
2464 let mut sig = match *ty.kind() {
2465 ty::FnDef(def_id, substs) => tcx
2466 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2467 .subst(tcx, substs),
2468 _ => unreachable!(),
2471 if let ty::InstanceDef::VtableShim(..) = self.def {
2472 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2473 sig = sig.map_bound(|mut sig| {
2474 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2475 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2476 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2482 ty::Closure(def_id, substs) => {
2483 let sig = substs.as_closure().sig();
2485 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2486 sig.map_bound(|sig| {
2488 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2496 ty::Generator(_, substs, _) => {
2497 let sig = substs.as_generator().poly_sig();
2499 let br = ty::BoundRegion { kind: ty::BrEnv };
2500 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2501 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2503 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2504 let pin_adt_ref = tcx.adt_def(pin_did);
2505 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2506 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2508 sig.map_bound(|sig| {
2509 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2510 let state_adt_ref = tcx.adt_def(state_did);
2512 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2513 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2516 [env_ty, sig.resume_ty].iter(),
2519 hir::Unsafety::Normal,
2520 rustc_target::spec::abi::Abi::Rust,
2524 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2529 pub trait FnAbiExt<'tcx, C>
2531 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2535 + HasParamEnv<'tcx>,
2537 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2539 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2540 /// instead, where the instance is a `InstanceDef::Virtual`.
2541 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2543 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2544 /// direct calls to an `fn`.
2546 /// NB: that includes virtual calls, which are represented by "direct calls"
2547 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2548 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2552 sig: ty::PolyFnSig<'tcx>,
2553 extra_args: &[Ty<'tcx>],
2554 caller_location: Option<Ty<'tcx>>,
2555 codegen_fn_attr_flags: CodegenFnAttrFlags,
2556 make_self_ptr_thin: bool,
2558 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2562 panic_strategy: PanicStrategy,
2563 codegen_fn_attr_flags: CodegenFnAttrFlags,
2566 if panic_strategy != PanicStrategy::Unwind {
2567 // In panic=abort mode we assume nothing can unwind anywhere, so
2568 // optimize based on this!
2570 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2571 // If a specific #[unwind] attribute is present, use that.
2573 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2574 // Special attribute for allocator functions, which can't unwind.
2577 if call_conv == Conv::Rust {
2578 // Any Rust method (or `extern "Rust" fn` or `extern
2579 // "rust-call" fn`) is explicitly allowed to unwind
2580 // (unless it has no-unwind attribute, handled above).
2583 // Anything else is either:
2585 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2587 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2589 // Foreign items (case 1) are assumed to not unwind; it is
2590 // UB otherwise. (At least for now; see also
2591 // rust-lang/rust#63909 and Rust RFC 2753.)
2593 // Items defined in Rust with non-Rust ABIs (case 2) are also
2594 // not supposed to unwind. Whether this should be enforced
2595 // (versus stating it is UB) and *how* it would be enforced
2596 // is currently under discussion; see rust-lang/rust#58794.
2598 // In either case, we mark item as explicitly nounwind.
2604 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2606 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2610 + HasParamEnv<'tcx>,
2612 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2613 // Assume that fn pointers may always unwind
2614 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2616 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
2619 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2620 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2622 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2623 Some(cx.tcx().caller_location_ty())
2628 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2630 call::FnAbi::new_internal(
2636 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2642 sig: ty::PolyFnSig<'tcx>,
2643 extra_args: &[Ty<'tcx>],
2644 caller_location: Option<Ty<'tcx>>,
2645 codegen_fn_attr_flags: CodegenFnAttrFlags,
2646 force_thin_self_ptr: bool,
2648 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2650 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2652 use rustc_target::spec::abi::Abi::*;
2653 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2654 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2656 // It's the ABI's job to select this, not ours.
2657 System => bug!("system abi should be selected elsewhere"),
2658 EfiApi => bug!("eficall abi should be selected elsewhere"),
2660 Stdcall => Conv::X86Stdcall,
2661 Fastcall => Conv::X86Fastcall,
2662 Vectorcall => Conv::X86VectorCall,
2663 Thiscall => Conv::X86ThisCall,
2665 Unadjusted => Conv::C,
2666 Win64 => Conv::X86_64Win64,
2667 SysV64 => Conv::X86_64SysV,
2668 Aapcs => Conv::ArmAapcs,
2669 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2670 PtxKernel => Conv::PtxKernel,
2671 Msp430Interrupt => Conv::Msp430Intr,
2672 X86Interrupt => Conv::X86Intr,
2673 AmdGpuKernel => Conv::AmdGpuKernel,
2674 AvrInterrupt => Conv::AvrInterrupt,
2675 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2677 // These API constants ought to be more specific...
2681 let mut inputs = sig.inputs();
2682 let extra_args = if sig.abi == RustCall {
2683 assert!(!sig.c_variadic && extra_args.is_empty());
2685 if let Some(input) = sig.inputs().last() {
2686 if let ty::Tuple(tupled_arguments) = input.kind() {
2687 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2688 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2691 "argument to function with \"rust-call\" ABI \
2697 "argument to function with \"rust-call\" ABI \
2702 assert!(sig.c_variadic || extra_args.is_empty());
2706 let target = &cx.tcx().sess.target;
2707 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2708 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2709 let linux_s390x_gnu_like =
2710 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2711 let linux_sparc64_gnu_like =
2712 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2713 let linux_powerpc_gnu_like =
2714 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2715 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2717 // Handle safe Rust thin and fat pointers.
2718 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2720 layout: TyAndLayout<'tcx>,
2723 // Booleans are always an i1 that needs to be zero-extended.
2724 if scalar.is_bool() {
2725 attrs.ext(ArgExtension::Zext);
2729 // Only pointer types handled below.
2730 if scalar.value != Pointer {
2734 if scalar.valid_range.start() < scalar.valid_range.end() {
2735 if *scalar.valid_range.start() > 0 {
2736 attrs.set(ArgAttribute::NonNull);
2740 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2741 if let Some(kind) = pointee.safe {
2742 attrs.pointee_align = Some(pointee.align);
2744 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2745 // for the entire duration of the function as they can be deallocated
2746 // at any time. Set their valid size to 0.
2747 attrs.pointee_size = match kind {
2748 PointerKind::UniqueOwned => Size::ZERO,
2752 // `Box` pointer parameters never alias because ownership is transferred
2753 // `&mut` pointer parameters never alias other parameters,
2754 // or mutable global data
2756 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2757 // and can be marked as both `readonly` and `noalias`, as
2758 // LLVM's definition of `noalias` is based solely on memory
2759 // dependencies rather than pointer equality
2760 let no_alias = match kind {
2761 PointerKind::Shared => false,
2762 PointerKind::UniqueOwned => true,
2763 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2766 attrs.set(ArgAttribute::NoAlias);
2769 if kind == PointerKind::Frozen && !is_return {
2770 attrs.set(ArgAttribute::ReadOnly);
2776 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2777 let is_return = arg_idx.is_none();
2779 let layout = cx.layout_of(ty);
2780 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2781 // Don't pass the vtable, it's not an argument of the virtual fn.
2782 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2783 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2784 make_thin_self_ptr(cx, layout)
2789 let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2790 let mut attrs = ArgAttributes::new();
2791 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2795 if arg.layout.is_zst() {
2796 // For some forsaken reason, x86_64-pc-windows-gnu
2797 // doesn't ignore zero-sized struct arguments.
2798 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2802 && !linux_s390x_gnu_like
2803 && !linux_sparc64_gnu_like
2804 && !linux_powerpc_gnu_like)
2806 arg.mode = PassMode::Ignore;
2813 let mut fn_abi = FnAbi {
2814 ret: arg_of(sig.output(), None),
2819 .chain(caller_location)
2821 .map(|(i, ty)| arg_of(ty, Some(i)))
2823 c_variadic: sig.c_variadic,
2824 fixed_count: inputs.len(),
2826 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2828 fn_abi.adjust_for_abi(cx, sig.abi);
2829 debug!("FnAbi::new_internal = {:?}", fn_abi);
2833 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2834 if abi == SpecAbi::Unadjusted {
2838 if abi == SpecAbi::Rust
2839 || abi == SpecAbi::RustCall
2840 || abi == SpecAbi::RustIntrinsic
2841 || abi == SpecAbi::PlatformIntrinsic
2843 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2844 if arg.is_ignore() {
2848 match arg.layout.abi {
2849 Abi::Aggregate { .. } => {}
2851 // This is a fun case! The gist of what this is doing is
2852 // that we want callers and callees to always agree on the
2853 // ABI of how they pass SIMD arguments. If we were to *not*
2854 // make these arguments indirect then they'd be immediates
2855 // in LLVM, which means that they'd used whatever the
2856 // appropriate ABI is for the callee and the caller. That
2857 // means, for example, if the caller doesn't have AVX
2858 // enabled but the callee does, then passing an AVX argument
2859 // across this boundary would cause corrupt data to show up.
2861 // This problem is fixed by unconditionally passing SIMD
2862 // arguments through memory between callers and callees
2863 // which should get them all to agree on ABI regardless of
2864 // target feature sets. Some more information about this
2865 // issue can be found in #44367.
2867 // Note that the platform intrinsic ABI is exempt here as
2868 // that's how we connect up to LLVM and it's unstable
2869 // anyway, we control all calls to it in libstd.
2871 if abi != SpecAbi::PlatformIntrinsic
2872 && cx.tcx().sess.target.simd_types_indirect =>
2874 arg.make_indirect();
2881 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2882 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2883 let max_by_val_size = Pointer.size(cx) * 2;
2884 let size = arg.layout.size;
2886 if arg.layout.is_unsized() || size > max_by_val_size {
2887 arg.make_indirect();
2889 // We want to pass small aggregates as immediates, but using
2890 // a LLVM aggregate type for this leads to bad optimizations,
2891 // so we pick an appropriately sized integer type instead.
2892 arg.cast_to(Reg { kind: RegKind::Integer, size });
2895 fixup(&mut self.ret);
2896 for arg in &mut self.args {
2902 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2903 cx.tcx().sess.fatal(&msg);
2908 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
2910 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2912 + HasParamEnv<'tcx>,
2914 let fat_pointer_ty = if layout.is_unsized() {
2915 // unsized `self` is passed as a pointer to `self`
2916 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2917 cx.tcx().mk_mut_ptr(layout.ty)
2920 Abi::ScalarPair(..) => (),
2921 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2924 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2925 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2926 // elsewhere in the compiler as a method on a `dyn Trait`.
2927 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2928 // get a built-in pointer type
2929 let mut fat_pointer_layout = layout;
2930 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2931 && !fat_pointer_layout.ty.is_region_ptr()
2933 for i in 0..fat_pointer_layout.fields.count() {
2934 let field_layout = fat_pointer_layout.field(cx, i);
2936 if !field_layout.is_zst() {
2937 fat_pointer_layout = field_layout;
2938 continue 'descend_newtypes;
2942 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2945 fat_pointer_layout.ty
2948 // we now have a type like `*mut RcBox<dyn Trait>`
2949 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2950 // this is understood as a special case elsewhere in the compiler
2951 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2952 layout = cx.layout_of(unit_pointer_ty);
2953 layout.ty = fat_pointer_ty;