1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
33 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
34 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
44 impl IntegerExt for Integer {
45 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
46 match (*self, signed) {
47 (I8, false) => tcx.types.u8,
48 (I16, false) => tcx.types.u16,
49 (I32, false) => tcx.types.u32,
50 (I64, false) => tcx.types.u64,
51 (I128, false) => tcx.types.u128,
52 (I8, true) => tcx.types.i8,
53 (I16, true) => tcx.types.i16,
54 (I32, true) => tcx.types.i32,
55 (I64, true) => tcx.types.i64,
56 (I128, true) => tcx.types.i128,
60 /// Gets the Integer type from an attr::IntType.
61 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
62 let dl = cx.data_layout();
65 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
66 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
67 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
68 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
69 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
70 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
71 dl.ptr_sized_integer()
76 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
79 ty::IntTy::I16 => I16,
80 ty::IntTy::I32 => I32,
81 ty::IntTy::I64 => I64,
82 ty::IntTy::I128 => I128,
83 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
86 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
89 ty::UintTy::U16 => I16,
90 ty::UintTy::U32 => I32,
91 ty::UintTy::U64 => I64,
92 ty::UintTy::U128 => I128,
93 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
97 /// Finds the appropriate Integer type and signedness for the given
98 /// signed discriminant range and `#[repr]` attribute.
99 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
100 /// that shouldn't affect anything, other than maybe debuginfo.
107 ) -> (Integer, bool) {
108 // Theoretically, negative values could be larger in unsigned representation
109 // than the unsigned representation of the signed minimum. However, if there
110 // are any negative values, the only valid unsigned representation is u128
111 // which can fit all i128 values, so the result remains unaffected.
112 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
113 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
115 let mut min_from_extern = None;
116 let min_default = I8;
118 if let Some(ity) = repr.int {
119 let discr = Integer::from_attr(&tcx, ity);
120 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
123 "Integer::repr_discr: `#[repr]` hint too small for \
124 discriminant range of enum `{}",
128 return (discr, ity.is_signed());
132 match &tcx.sess.target.arch[..] {
133 // WARNING: the ARM EABI has two variants; the one corresponding
134 // to `at_least == I32` appears to be used on Linux and NetBSD,
135 // but some systems may use the variant corresponding to no
136 // lower bound. However, we don't run on those yet...?
137 "arm" => min_from_extern = Some(I32),
138 _ => min_from_extern = Some(I32),
142 let at_least = min_from_extern.unwrap_or(min_default);
144 // If there are no negative values, we can use the unsigned fit.
146 (cmp::max(unsigned_fit, at_least), false)
148 (cmp::max(signed_fit, at_least), true)
153 pub trait PrimitiveExt {
154 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
155 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 impl PrimitiveExt for Primitive {
159 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
161 Int(i, signed) => i.to_ty(tcx, signed),
162 F32 => tcx.types.f32,
163 F64 => tcx.types.f64,
164 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
168 /// Return an *integer* type matching this primitive.
169 /// Useful in particular when dealing with enum discriminants.
170 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
172 Int(i, signed) => i.to_ty(tcx, signed),
173 Pointer => tcx.types.usize,
174 F32 | F64 => bug!("floats do not have an int type"),
179 /// The first half of a fat pointer.
181 /// - For a trait object, this is the address of the box.
182 /// - For a slice, this is the base address.
183 pub const FAT_PTR_ADDR: usize = 0;
185 /// The second half of a fat pointer.
187 /// - For a trait object, this is the address of the vtable.
188 /// - For a slice, this is the length.
189 pub const FAT_PTR_EXTRA: usize = 1;
191 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
192 pub enum LayoutError<'tcx> {
194 SizeOverflow(Ty<'tcx>),
197 impl<'tcx> fmt::Display for LayoutError<'tcx> {
198 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
200 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
201 LayoutError::SizeOverflow(ty) => {
202 write!(f, "values of the type `{}` are too big for the current architecture", ty)
210 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
211 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
212 ty::tls::with_related_context(tcx, move |icx| {
213 let (param_env, ty) = query.into_parts();
215 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
216 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
219 // Update the ImplicitCtxt to increase the layout_depth
220 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
222 ty::tls::enter_context(&icx, |_| {
223 let cx = LayoutCx { tcx, param_env };
224 let layout = cx.layout_raw_uncached(ty);
225 // Type-level uninhabitedness should always imply ABI uninhabitedness.
226 if let Ok(layout) = layout {
227 if ty.conservative_is_privately_uninhabited(tcx) {
228 assert!(layout.abi.is_uninhabited());
236 pub fn provide(providers: &mut ty::query::Providers) {
237 *providers = ty::query::Providers { layout_raw, ..*providers };
240 pub struct LayoutCx<'tcx, C> {
242 pub param_env: ty::ParamEnv<'tcx>,
245 #[derive(Copy, Clone, Debug)]
247 /// A tuple, closure, or univariant which cannot be coerced to unsized.
249 /// A univariant, the last field of which may be coerced to unsized.
251 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
252 Prefixed(Size, Align),
255 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
256 // This is used to go between `memory_index` (source field order to memory order)
257 // and `inverse_memory_index` (memory order to source field order).
258 // See also `FieldsShape::Arbitrary::memory_index` for more details.
259 // FIXME(eddyb) build a better abstraction for permutations, if possible.
260 fn invert_mapping(map: &[u32]) -> Vec<u32> {
261 let mut inverse = vec![0; map.len()];
262 for i in 0..map.len() {
263 inverse[map[i] as usize] = i as u32;
268 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
269 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
270 let dl = self.data_layout();
271 let b_align = b.value.align(dl);
272 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
273 let b_offset = a.value.size(dl).align_to(b_align.abi);
274 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
276 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
277 // returns the last maximum.
278 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
280 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
281 .max_by_key(|niche| niche.available(dl));
284 variants: Variants::Single { index: VariantIdx::new(0) },
285 fields: FieldsShape::Arbitrary {
286 offsets: vec![Size::ZERO, b_offset],
287 memory_index: vec![0, 1],
289 abi: Abi::ScalarPair(a, b),
296 fn univariant_uninterned(
299 fields: &[TyAndLayout<'_>],
302 ) -> Result<Layout, LayoutError<'tcx>> {
303 let dl = self.data_layout();
304 let pack = repr.pack;
305 if pack.is_some() && repr.align.is_some() {
306 bug!("struct cannot be packed and aligned");
309 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
311 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
313 let optimize = !repr.inhibit_struct_field_reordering_opt();
316 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
317 let optimizing = &mut inverse_memory_index[..end];
318 let field_align = |f: &TyAndLayout<'_>| {
319 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
322 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
323 optimizing.sort_by_key(|&x| {
324 // Place ZSTs first to avoid "interesting offsets",
325 // especially with only one or two non-ZST fields.
326 let f = &fields[x as usize];
327 (!f.is_zst(), cmp::Reverse(field_align(f)))
330 StructKind::Prefixed(..) => {
331 // Sort in ascending alignment so that the layout stay optimal
332 // regardless of the prefix
333 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
338 // inverse_memory_index holds field indices by increasing memory offset.
339 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
340 // We now write field offsets to the corresponding offset slot;
341 // field 5 with offset 0 puts 0 in offsets[5].
342 // At the bottom of this function, we invert `inverse_memory_index` to
343 // produce `memory_index` (see `invert_mapping`).
345 let mut sized = true;
346 let mut offsets = vec![Size::ZERO; fields.len()];
347 let mut offset = Size::ZERO;
348 let mut largest_niche = None;
349 let mut largest_niche_available = 0;
351 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
353 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
354 align = align.max(AbiAndPrefAlign::new(prefix_align));
355 offset = prefix_size.align_to(prefix_align);
358 for &i in &inverse_memory_index {
359 let field = fields[i as usize];
361 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
364 if field.is_unsized() {
368 // Invariant: offset < dl.obj_size_bound() <= 1<<61
369 let field_align = if let Some(pack) = pack {
370 field.align.min(AbiAndPrefAlign::new(pack))
374 offset = offset.align_to(field_align.abi);
375 align = align.max(field_align);
377 debug!("univariant offset: {:?} field: {:#?}", offset, field);
378 offsets[i as usize] = offset;
380 if !repr.hide_niche() {
381 if let Some(mut niche) = field.largest_niche.clone() {
382 let available = niche.available(dl);
383 if available > largest_niche_available {
384 largest_niche_available = available;
385 niche.offset += offset;
386 largest_niche = Some(niche);
391 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
394 if let Some(repr_align) = repr.align {
395 align = align.max(AbiAndPrefAlign::new(repr_align));
398 debug!("univariant min_size: {:?}", offset);
399 let min_size = offset;
401 // As stated above, inverse_memory_index holds field indices by increasing offset.
402 // This makes it an already-sorted view of the offsets vec.
403 // To invert it, consider:
404 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
405 // Field 5 would be the first element, so memory_index is i:
406 // Note: if we didn't optimize, it's already right.
409 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
411 let size = min_size.align_to(align.abi);
412 let mut abi = Abi::Aggregate { sized };
414 // Unpack newtype ABIs and find scalar pairs.
415 if sized && size.bytes() > 0 {
416 // All other fields must be ZSTs.
417 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
419 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
420 // We have exactly one non-ZST field.
421 (Some((i, field)), None, None) => {
422 // Field fills the struct and it has a scalar or scalar pair ABI.
423 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
426 // For plain scalars, or vectors of them, we can't unpack
427 // newtypes for `#[repr(C)]`, as that affects C ABIs.
428 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
429 abi = field.abi.clone();
431 // But scalar pairs are Rust-specific and get
432 // treated as aggregates by C ABIs anyway.
433 Abi::ScalarPair(..) => {
434 abi = field.abi.clone();
441 // Two non-ZST fields, and they're both scalars.
443 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
444 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
447 // Order by the memory placement, not source order.
448 let ((i, a), (j, b)) =
449 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
450 let pair = self.scalar_pair(a.clone(), b.clone());
451 let pair_offsets = match pair.fields {
452 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
453 assert_eq!(memory_index, &[0, 1]);
458 if offsets[i] == pair_offsets[0]
459 && offsets[j] == pair_offsets[1]
460 && align == pair.align
463 // We can use `ScalarPair` only when it matches our
464 // already computed layout (including `#[repr(C)]`).
473 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
474 abi = Abi::Uninhabited;
478 variants: Variants::Single { index: VariantIdx::new(0) },
479 fields: FieldsShape::Arbitrary { offsets, memory_index },
487 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
489 let param_env = self.param_env;
490 let dl = self.data_layout();
491 let scalar_unit = |value: Primitive| {
492 let bits = value.size(dl).bits();
493 assert!(bits <= 128);
494 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
496 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
498 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
499 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
501 debug_assert!(!ty.has_infer_types_or_consts());
503 Ok(match *ty.kind() {
505 ty::Bool => tcx.intern_layout(Layout::scalar(
507 Scalar { value: Int(I8, false), valid_range: 0..=1 },
509 ty::Char => tcx.intern_layout(Layout::scalar(
511 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
513 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
514 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
515 ty::Float(fty) => scalar(match fty {
516 ty::FloatTy::F32 => F32,
517 ty::FloatTy::F64 => F64,
520 let mut ptr = scalar_unit(Pointer);
521 ptr.valid_range = 1..=*ptr.valid_range.end();
522 tcx.intern_layout(Layout::scalar(self, ptr))
526 ty::Never => tcx.intern_layout(Layout {
527 variants: Variants::Single { index: VariantIdx::new(0) },
528 fields: FieldsShape::Primitive,
529 abi: Abi::Uninhabited,
535 // Potentially-wide pointers.
536 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
537 let mut data_ptr = scalar_unit(Pointer);
538 if !ty.is_unsafe_ptr() {
539 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
542 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
543 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
544 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
547 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
548 let metadata = match unsized_part.kind() {
550 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
552 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
554 let mut vtable = scalar_unit(Pointer);
555 vtable.valid_range = 1..=*vtable.valid_range.end();
558 _ => return Err(LayoutError::Unknown(unsized_part)),
561 // Effectively a (ptr, meta) tuple.
562 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
565 // Arrays and slices.
566 ty::Array(element, mut count) => {
567 if count.has_projections() {
568 count = tcx.normalize_erasing_regions(param_env, count);
569 if count.has_projections() {
570 return Err(LayoutError::Unknown(ty));
574 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
575 let element = self.layout_of(element)?;
577 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
579 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
582 Abi::Aggregate { sized: true }
585 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
587 tcx.intern_layout(Layout {
588 variants: Variants::Single { index: VariantIdx::new(0) },
589 fields: FieldsShape::Array { stride: element.size, count },
592 align: element.align,
596 ty::Slice(element) => {
597 let element = self.layout_of(element)?;
598 tcx.intern_layout(Layout {
599 variants: Variants::Single { index: VariantIdx::new(0) },
600 fields: FieldsShape::Array { stride: element.size, count: 0 },
601 abi: Abi::Aggregate { sized: false },
603 align: element.align,
607 ty::Str => tcx.intern_layout(Layout {
608 variants: Variants::Single { index: VariantIdx::new(0) },
609 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
610 abi: Abi::Aggregate { sized: false },
617 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
618 ty::Dynamic(..) | ty::Foreign(..) => {
619 let mut unit = self.univariant_uninterned(
622 &ReprOptions::default(),
623 StructKind::AlwaysSized,
626 Abi::Aggregate { ref mut sized } => *sized = false,
629 tcx.intern_layout(unit)
632 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
634 ty::Closure(_, ref substs) => {
635 let tys = substs.as_closure().upvar_tys();
637 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
638 &ReprOptions::default(),
639 StructKind::AlwaysSized,
645 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
649 .map(|k| self.layout_of(k.expect_ty()))
650 .collect::<Result<Vec<_>, _>>()?,
651 &ReprOptions::default(),
656 // SIMD vector types.
657 ty::Adt(def, substs) if def.repr.simd() => {
658 // Supported SIMD vectors are homogeneous ADTs with at least one field:
660 // * #[repr(simd)] struct S(T, T, T, T);
661 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
662 // * #[repr(simd)] struct S([T; 4])
664 // where T is a primitive scalar (integer/float/pointer).
666 // SIMD vectors with zero fields are not supported.
667 // (should be caught by typeck)
668 if def.non_enum_variant().fields.is_empty() {
669 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
672 // Type of the first ADT field:
673 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
675 // Heterogeneous SIMD vectors are not supported:
676 // (should be caught by typeck)
677 for fi in &def.non_enum_variant().fields {
678 if fi.ty(tcx, substs) != f0_ty {
679 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
683 // The element type and number of elements of the SIMD vector
684 // are obtained from:
686 // * the element type and length of the single array field, if
687 // the first field is of array type, or
689 // * the homogenous field type and the number of fields.
690 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
691 // First ADT field is an array:
693 // SIMD vectors with multiple array fields are not supported:
694 // (should be caught by typeck)
695 if def.non_enum_variant().fields.len() != 1 {
696 tcx.sess.fatal(&format!(
697 "monomorphising SIMD type `{}` with more than one array field",
702 // Extract the number of elements from the layout of the array field:
703 let len = if let Ok(TyAndLayout {
704 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
706 }) = self.layout_of(f0_ty)
710 return Err(LayoutError::Unknown(ty));
715 // First ADT field is not an array:
716 (f0_ty, def.non_enum_variant().fields.len() as _, false)
719 // SIMD vectors of zero length are not supported.
721 // Can't be caught in typeck if the array length is generic.
723 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
726 // Compute the ABI of the element type:
727 let e_ly = self.layout_of(e_ty)?;
728 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
731 // This error isn't caught in typeck, e.g., if
732 // the element type of the vector is generic.
733 tcx.sess.fatal(&format!(
734 "monomorphising SIMD type `{}` with a non-primitive-scalar \
735 (integer/float/pointer) element type `{}`",
740 // Compute the size and alignment of the vector:
741 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
742 let align = dl.vector_align(size);
743 let size = size.align_to(align.abi);
745 // Compute the placement of the vector fields:
746 let fields = if is_array {
747 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
749 FieldsShape::Array { stride: e_ly.size, count: e_len }
752 tcx.intern_layout(Layout {
753 variants: Variants::Single { index: VariantIdx::new(0) },
755 abi: Abi::Vector { element: e_abi, count: e_len },
756 largest_niche: e_ly.largest_niche.clone(),
763 ty::Adt(def, substs) => {
764 // Cache the field layouts.
771 .map(|field| self.layout_of(field.ty(tcx, substs)))
772 .collect::<Result<Vec<_>, _>>()
774 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
777 if def.repr.pack.is_some() && def.repr.align.is_some() {
778 bug!("union cannot be packed and aligned");
782 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
784 if let Some(repr_align) = def.repr.align {
785 align = align.max(AbiAndPrefAlign::new(repr_align));
788 let optimize = !def.repr.inhibit_union_abi_opt();
789 let mut size = Size::ZERO;
790 let mut abi = Abi::Aggregate { sized: true };
791 let index = VariantIdx::new(0);
792 for field in &variants[index] {
793 assert!(!field.is_unsized());
794 align = align.max(field.align);
796 // If all non-ZST fields have the same ABI, forward this ABI
797 if optimize && !field.is_zst() {
798 // Normalize scalar_unit to the maximal valid range
799 let field_abi = match &field.abi {
800 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
801 Abi::ScalarPair(x, y) => {
802 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
804 Abi::Vector { element: x, count } => {
805 Abi::Vector { element: scalar_unit(x.value), count: *count }
807 Abi::Uninhabited | Abi::Aggregate { .. } => {
808 Abi::Aggregate { sized: true }
812 if size == Size::ZERO {
813 // first non ZST: initialize 'abi'
815 } else if abi != field_abi {
816 // different fields have different ABI: reset to Aggregate
817 abi = Abi::Aggregate { sized: true };
821 size = cmp::max(size, field.size);
824 if let Some(pack) = def.repr.pack {
825 align = align.min(AbiAndPrefAlign::new(pack));
828 return Ok(tcx.intern_layout(Layout {
829 variants: Variants::Single { index },
830 fields: FieldsShape::Union(
831 NonZeroUsize::new(variants[index].len())
832 .ok_or(LayoutError::Unknown(ty))?,
837 size: size.align_to(align.abi),
841 // A variant is absent if it's uninhabited and only has ZST fields.
842 // Present uninhabited variants only require space for their fields,
843 // but *not* an encoding of the discriminant (e.g., a tag value).
844 // See issue #49298 for more details on the need to leave space
845 // for non-ZST uninhabited data (mostly partial initialization).
846 let absent = |fields: &[TyAndLayout<'_>]| {
847 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
848 let is_zst = fields.iter().all(|f| f.is_zst());
849 uninhabited && is_zst
851 let (present_first, present_second) = {
852 let mut present_variants = variants
854 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
855 (present_variants.next(), present_variants.next())
857 let present_first = match present_first {
858 Some(present_first) => present_first,
859 // Uninhabited because it has no variants, or only absent ones.
860 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
861 // If it's a struct, still compute a layout so that we can still compute the
863 None => VariantIdx::new(0),
866 let is_struct = !def.is_enum() ||
867 // Only one variant is present.
868 (present_second.is_none() &&
869 // Representation optimizations are allowed.
870 !def.repr.inhibit_enum_layout_opt());
872 // Struct, or univariant enum equivalent to a struct.
873 // (Typechecking will reject discriminant-sizing attrs.)
875 let v = present_first;
876 let kind = if def.is_enum() || variants[v].is_empty() {
877 StructKind::AlwaysSized
879 let param_env = tcx.param_env(def.did);
880 let last_field = def.variants[v].fields.last().unwrap();
882 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
884 StructKind::MaybeUnsized
886 StructKind::AlwaysSized
890 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
891 st.variants = Variants::Single { index: v };
892 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
894 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
895 // the asserts ensure that we are not using the
896 // `#[rustc_layout_scalar_valid_range(n)]`
897 // attribute to widen the range of anything as that would probably
898 // result in UB somewhere
899 // FIXME(eddyb) the asserts are probably not needed,
900 // as larger validity ranges would result in missed
901 // optimizations, *not* wrongly assuming the inner
902 // value is valid. e.g. unions enlarge validity ranges,
903 // because the values may be uninitialized.
904 if let Bound::Included(start) = start {
905 // FIXME(eddyb) this might be incorrect - it doesn't
906 // account for wrap-around (end < start) ranges.
907 assert!(*scalar.valid_range.start() <= start);
908 scalar.valid_range = start..=*scalar.valid_range.end();
910 if let Bound::Included(end) = end {
911 // FIXME(eddyb) this might be incorrect - it doesn't
912 // account for wrap-around (end < start) ranges.
913 assert!(*scalar.valid_range.end() >= end);
914 scalar.valid_range = *scalar.valid_range.start()..=end;
917 // Update `largest_niche` if we have introduced a larger niche.
918 let niche = if def.repr.hide_niche() {
921 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
923 if let Some(niche) = niche {
924 match &st.largest_niche {
925 Some(largest_niche) => {
926 // Replace the existing niche even if they're equal,
927 // because this one is at a lower offset.
928 if largest_niche.available(dl) <= niche.available(dl) {
929 st.largest_niche = Some(niche);
932 None => st.largest_niche = Some(niche),
937 start == Bound::Unbounded && end == Bound::Unbounded,
938 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
944 return Ok(tcx.intern_layout(st));
947 // At this point, we have handled all unions and
948 // structs. (We have also handled univariant enums
949 // that allow representation optimization.)
950 assert!(def.is_enum());
952 // The current code for niche-filling relies on variant indices
953 // instead of actual discriminants, so dataful enums with
954 // explicit discriminants (RFC #2363) would misbehave.
955 let no_explicit_discriminants = def
958 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
960 let mut niche_filling_layout = None;
962 // Niche-filling enum optimization.
963 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
964 let mut dataful_variant = None;
965 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
967 // Find one non-ZST variant.
968 'variants: for (v, fields) in variants.iter_enumerated() {
974 if dataful_variant.is_none() {
975 dataful_variant = Some(v);
978 dataful_variant = None;
983 niche_variants = *niche_variants.start().min(&v)..=v;
986 if niche_variants.start() > niche_variants.end() {
987 dataful_variant = None;
990 if let Some(i) = dataful_variant {
991 let count = (niche_variants.end().as_u32()
992 - niche_variants.start().as_u32()
995 // Find the field with the largest niche
996 let niche_candidate = variants[i]
999 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1000 .max_by_key(|(_, niche)| niche.available(dl));
1002 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1003 niche_candidate.and_then(|(field_index, niche)| {
1004 Some((field_index, niche, niche.reserve(self, count)?))
1007 let mut align = dl.aggregate_align;
1011 let mut st = self.univariant_uninterned(
1015 StructKind::AlwaysSized,
1017 st.variants = Variants::Single { index: j };
1019 align = align.max(st.align);
1023 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1025 let offset = st[i].fields.offset(field_index) + niche.offset;
1026 let size = st[i].size;
1028 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1032 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1033 Abi::ScalarPair(ref first, ref second) => {
1034 // We need to use scalar_unit to reset the
1035 // valid range to the maximal one for that
1036 // primitive, because only the niche is
1037 // guaranteed to be initialised, not the
1039 if offset.bytes() == 0 {
1041 niche_scalar.clone(),
1042 scalar_unit(second.value),
1046 scalar_unit(first.value),
1047 niche_scalar.clone(),
1051 _ => Abi::Aggregate { sized: true },
1056 Niche::from_scalar(dl, offset, niche_scalar.clone());
1058 niche_filling_layout = Some(Layout {
1059 variants: Variants::Multiple {
1061 tag_encoding: TagEncoding::Niche {
1069 fields: FieldsShape::Arbitrary {
1070 offsets: vec![offset],
1071 memory_index: vec![0],
1082 let (mut min, mut max) = (i128::MAX, i128::MIN);
1083 let discr_type = def.repr.discr_type();
1084 let bits = Integer::from_attr(self, discr_type).size().bits();
1085 for (i, discr) in def.discriminants(tcx) {
1086 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1089 let mut x = discr.val as i128;
1090 if discr_type.is_signed() {
1091 // sign extend the raw representation to be an i128
1092 x = (x << (128 - bits)) >> (128 - bits);
1101 // We might have no inhabited variants, so pretend there's at least one.
1102 if (min, max) == (i128::MAX, i128::MIN) {
1106 assert!(min <= max, "discriminant range is {}...{}", min, max);
1107 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1109 let mut align = dl.aggregate_align;
1110 let mut size = Size::ZERO;
1112 // We're interested in the smallest alignment, so start large.
1113 let mut start_align = Align::from_bytes(256).unwrap();
1114 assert_eq!(Integer::for_align(dl, start_align), None);
1116 // repr(C) on an enum tells us to make a (tag, union) layout,
1117 // so we need to grow the prefix alignment to be at least
1118 // the alignment of the union. (This value is used both for
1119 // determining the alignment of the overall enum, and the
1120 // determining the alignment of the payload after the tag.)
1121 let mut prefix_align = min_ity.align(dl).abi;
1123 for fields in &variants {
1124 for field in fields {
1125 prefix_align = prefix_align.max(field.align.abi);
1130 // Create the set of structs that represent each variant.
1131 let mut layout_variants = variants
1133 .map(|(i, field_layouts)| {
1134 let mut st = self.univariant_uninterned(
1138 StructKind::Prefixed(min_ity.size(), prefix_align),
1140 st.variants = Variants::Single { index: i };
1141 // Find the first field we can't move later
1142 // to make room for a larger discriminant.
1144 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1146 if !field.is_zst() || field.align.abi.bytes() != 1 {
1147 start_align = start_align.min(field.align.abi);
1151 size = cmp::max(size, st.size);
1152 align = align.max(st.align);
1155 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1157 // Align the maximum variant size to the largest alignment.
1158 size = size.align_to(align.abi);
1160 if size.bytes() >= dl.obj_size_bound() {
1161 return Err(LayoutError::SizeOverflow(ty));
1164 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1165 if typeck_ity < min_ity {
1166 // It is a bug if Layout decided on a greater discriminant size than typeck for
1167 // some reason at this point (based on values discriminant can take on). Mostly
1168 // because this discriminant will be loaded, and then stored into variable of
1169 // type calculated by typeck. Consider such case (a bug): typeck decided on
1170 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1171 // discriminant values. That would be a bug, because then, in codegen, in order
1172 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1173 // space necessary to represent would have to be discarded (or layout is wrong
1174 // on thinking it needs 16 bits)
1176 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1180 // However, it is fine to make discr type however large (as an optimisation)
1181 // after this point – we’ll just truncate the value we load in codegen.
1184 // Check to see if we should use a different type for the
1185 // discriminant. We can safely use a type with the same size
1186 // as the alignment of the first field of each variant.
1187 // We increase the size of the discriminant to avoid LLVM copying
1188 // padding when it doesn't need to. This normally causes unaligned
1189 // load/stores and excessive memcpy/memset operations. By using a
1190 // bigger integer size, LLVM can be sure about its contents and
1191 // won't be so conservative.
1193 // Use the initial field alignment
1194 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1197 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1200 // If the alignment is not larger than the chosen discriminant size,
1201 // don't use the alignment as the final size.
1205 // Patch up the variants' first few fields.
1206 let old_ity_size = min_ity.size();
1207 let new_ity_size = ity.size();
1208 for variant in &mut layout_variants {
1209 match variant.fields {
1210 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1212 if *i <= old_ity_size {
1213 assert_eq!(*i, old_ity_size);
1217 // We might be making the struct larger.
1218 if variant.size <= old_ity_size {
1219 variant.size = new_ity_size;
1227 let tag_mask = !0u128 >> (128 - ity.size().bits());
1229 value: Int(ity, signed),
1230 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1232 let mut abi = Abi::Aggregate { sized: true };
1233 if tag.value.size(dl) == size {
1234 abi = Abi::Scalar(tag.clone());
1236 // Try to use a ScalarPair for all tagged enums.
1237 let mut common_prim = None;
1238 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1239 let offsets = match layout_variant.fields {
1240 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1244 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1245 let (field, offset) = match (fields.next(), fields.next()) {
1246 (None, None) => continue,
1247 (Some(pair), None) => pair,
1253 let prim = match field.abi {
1254 Abi::Scalar(ref scalar) => scalar.value,
1260 if let Some(pair) = common_prim {
1261 // This is pretty conservative. We could go fancier
1262 // by conflating things like i32 and u32, or even
1263 // realising that (u8, u8) could just cohabit with
1265 if pair != (prim, offset) {
1270 common_prim = Some((prim, offset));
1273 if let Some((prim, offset)) = common_prim {
1274 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1275 let pair_offsets = match pair.fields {
1276 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1277 assert_eq!(memory_index, &[0, 1]);
1282 if pair_offsets[0] == Size::ZERO
1283 && pair_offsets[1] == *offset
1284 && align == pair.align
1285 && size == pair.size
1287 // We can use `ScalarPair` only when it matches our
1288 // already computed layout (including `#[repr(C)]`).
1294 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1295 abi = Abi::Uninhabited;
1298 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1300 let tagged_layout = Layout {
1301 variants: Variants::Multiple {
1303 tag_encoding: TagEncoding::Direct,
1305 variants: layout_variants,
1307 fields: FieldsShape::Arbitrary {
1308 offsets: vec![Size::ZERO],
1309 memory_index: vec![0],
1317 let best_layout = match (tagged_layout, niche_filling_layout) {
1318 (tagged_layout, Some(niche_filling_layout)) => {
1319 // Pick the smaller layout; otherwise,
1320 // pick the layout with the larger niche; otherwise,
1321 // pick tagged as it has simpler codegen.
1322 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1324 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1325 (layout.size, cmp::Reverse(niche_size))
1328 (tagged_layout, None) => tagged_layout,
1331 tcx.intern_layout(best_layout)
1334 // Types with no meaningful known layout.
1335 ty::Projection(_) | ty::Opaque(..) => {
1336 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1337 if ty == normalized {
1338 return Err(LayoutError::Unknown(ty));
1340 tcx.layout_raw(param_env.and(normalized))?
1343 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1344 bug!("Layout::compute: unexpected type `{}`", ty)
1347 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1348 return Err(LayoutError::Unknown(ty));
1354 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1355 #[derive(Clone, Debug, PartialEq)]
1356 enum SavedLocalEligibility {
1358 Assigned(VariantIdx),
1359 // FIXME: Use newtype_index so we aren't wasting bytes
1360 Ineligible(Option<u32>),
1363 // When laying out generators, we divide our saved local fields into two
1364 // categories: overlap-eligible and overlap-ineligible.
1366 // Those fields which are ineligible for overlap go in a "prefix" at the
1367 // beginning of the layout, and always have space reserved for them.
1369 // Overlap-eligible fields are only assigned to one variant, so we lay
1370 // those fields out for each variant and put them right after the
1373 // Finally, in the layout details, we point to the fields from the
1374 // variants they are assigned to. It is possible for some fields to be
1375 // included in multiple variants. No field ever "moves around" in the
1376 // layout; its offset is always the same.
1378 // Also included in the layout are the upvars and the discriminant.
1379 // These are included as fields on the "outer" layout; they are not part
1381 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1382 /// Compute the eligibility and assignment of each local.
1383 fn generator_saved_local_eligibility(
1385 info: &GeneratorLayout<'tcx>,
1386 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1387 use SavedLocalEligibility::*;
1389 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1390 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1392 // The saved locals not eligible for overlap. These will get
1393 // "promoted" to the prefix of our generator.
1394 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1396 // Figure out which of our saved locals are fields in only
1397 // one variant. The rest are deemed ineligible for overlap.
1398 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1399 for local in fields {
1400 match assignments[*local] {
1402 assignments[*local] = Assigned(variant_index);
1405 // We've already seen this local at another suspension
1406 // point, so it is no longer a candidate.
1408 "removing local {:?} in >1 variant ({:?}, {:?})",
1413 ineligible_locals.insert(*local);
1414 assignments[*local] = Ineligible(None);
1421 // Next, check every pair of eligible locals to see if they
1423 for local_a in info.storage_conflicts.rows() {
1424 let conflicts_a = info.storage_conflicts.count(local_a);
1425 if ineligible_locals.contains(local_a) {
1429 for local_b in info.storage_conflicts.iter(local_a) {
1430 // local_a and local_b are storage live at the same time, therefore they
1431 // cannot overlap in the generator layout. The only way to guarantee
1432 // this is if they are in the same variant, or one is ineligible
1433 // (which means it is stored in every variant).
1434 if ineligible_locals.contains(local_b)
1435 || assignments[local_a] == assignments[local_b]
1440 // If they conflict, we will choose one to make ineligible.
1441 // This is not always optimal; it's just a greedy heuristic that
1442 // seems to produce good results most of the time.
1443 let conflicts_b = info.storage_conflicts.count(local_b);
1444 let (remove, other) =
1445 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1446 ineligible_locals.insert(remove);
1447 assignments[remove] = Ineligible(None);
1448 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1452 // Count the number of variants in use. If only one of them, then it is
1453 // impossible to overlap any locals in our layout. In this case it's
1454 // always better to make the remaining locals ineligible, so we can
1455 // lay them out with the other locals in the prefix and eliminate
1456 // unnecessary padding bytes.
1458 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1459 for assignment in &assignments {
1460 if let Assigned(idx) = assignment {
1461 used_variants.insert(*idx);
1464 if used_variants.count() < 2 {
1465 for assignment in assignments.iter_mut() {
1466 *assignment = Ineligible(None);
1468 ineligible_locals.insert_all();
1472 // Write down the order of our locals that will be promoted to the prefix.
1474 for (idx, local) in ineligible_locals.iter().enumerate() {
1475 assignments[local] = Ineligible(Some(idx as u32));
1478 debug!("generator saved local assignments: {:?}", assignments);
1480 (ineligible_locals, assignments)
1483 /// Compute the full generator layout.
1484 fn generator_layout(
1487 def_id: hir::def_id::DefId,
1488 substs: SubstsRef<'tcx>,
1489 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1490 use SavedLocalEligibility::*;
1492 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1494 let info = match tcx.generator_layout(def_id) {
1495 None => return Err(LayoutError::Unknown(ty)),
1498 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1500 // Build a prefix layout, including "promoting" all ineligible
1501 // locals as part of the prefix. We compute the layout of all of
1502 // these fields at once to get optimal packing.
1503 let tag_index = substs.as_generator().prefix_tys().count();
1505 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1506 let max_discr = (info.variant_fields.len() - 1) as u128;
1507 let discr_int = Integer::fit_unsigned(max_discr);
1508 let discr_int_ty = discr_int.to_ty(tcx, false);
1509 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1510 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1511 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1513 let promoted_layouts = ineligible_locals
1515 .map(|local| subst_field(info.field_tys[local]))
1516 .map(|ty| tcx.mk_maybe_uninit(ty))
1517 .map(|ty| self.layout_of(ty));
1518 let prefix_layouts = substs
1521 .map(|ty| self.layout_of(ty))
1522 .chain(iter::once(Ok(tag_layout)))
1523 .chain(promoted_layouts)
1524 .collect::<Result<Vec<_>, _>>()?;
1525 let prefix = self.univariant_uninterned(
1528 &ReprOptions::default(),
1529 StructKind::AlwaysSized,
1532 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1534 // Split the prefix layout into the "outer" fields (upvars and
1535 // discriminant) and the "promoted" fields. Promoted fields will
1536 // get included in each variant that requested them in
1538 debug!("prefix = {:#?}", prefix);
1539 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1540 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1541 let mut inverse_memory_index = invert_mapping(&memory_index);
1543 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1544 // "outer" and "promoted" fields respectively.
1545 let b_start = (tag_index + 1) as u32;
1546 let offsets_b = offsets.split_off(b_start as usize);
1547 let offsets_a = offsets;
1549 // Disentangle the "a" and "b" components of `inverse_memory_index`
1550 // by preserving the order but keeping only one disjoint "half" each.
1551 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1552 let inverse_memory_index_b: Vec<_> =
1553 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1554 inverse_memory_index.retain(|&i| i < b_start);
1555 let inverse_memory_index_a = inverse_memory_index;
1557 // Since `inverse_memory_index_{a,b}` each only refer to their
1558 // respective fields, they can be safely inverted
1559 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1560 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1563 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1564 (outer_fields, offsets_b, memory_index_b)
1569 let mut size = prefix.size;
1570 let mut align = prefix.align;
1574 .map(|(index, variant_fields)| {
1575 // Only include overlap-eligible fields when we compute our variant layout.
1576 let variant_only_tys = variant_fields
1578 .filter(|local| match assignments[**local] {
1579 Unassigned => bug!(),
1580 Assigned(v) if v == index => true,
1581 Assigned(_) => bug!("assignment does not match variant"),
1582 Ineligible(_) => false,
1584 .map(|local| subst_field(info.field_tys[*local]));
1586 let mut variant = self.univariant_uninterned(
1589 .map(|ty| self.layout_of(ty))
1590 .collect::<Result<Vec<_>, _>>()?,
1591 &ReprOptions::default(),
1592 StructKind::Prefixed(prefix_size, prefix_align.abi),
1594 variant.variants = Variants::Single { index };
1596 let (offsets, memory_index) = match variant.fields {
1597 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1601 // Now, stitch the promoted and variant-only fields back together in
1602 // the order they are mentioned by our GeneratorLayout.
1603 // Because we only use some subset (that can differ between variants)
1604 // of the promoted fields, we can't just pick those elements of the
1605 // `promoted_memory_index` (as we'd end up with gaps).
1606 // So instead, we build an "inverse memory_index", as if all of the
1607 // promoted fields were being used, but leave the elements not in the
1608 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1609 // obtain a valid (bijective) mapping.
1610 const INVALID_FIELD_IDX: u32 = !0;
1611 let mut combined_inverse_memory_index =
1612 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1613 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1614 let combined_offsets = variant_fields
1618 let (offset, memory_index) = match assignments[*local] {
1619 Unassigned => bug!(),
1621 let (offset, memory_index) =
1622 offsets_and_memory_index.next().unwrap();
1623 (offset, promoted_memory_index.len() as u32 + memory_index)
1625 Ineligible(field_idx) => {
1626 let field_idx = field_idx.unwrap() as usize;
1627 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1630 combined_inverse_memory_index[memory_index as usize] = i as u32;
1635 // Remove the unused slots and invert the mapping to obtain the
1636 // combined `memory_index` (also see previous comment).
1637 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1638 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1640 variant.fields = FieldsShape::Arbitrary {
1641 offsets: combined_offsets,
1642 memory_index: combined_memory_index,
1645 size = size.max(variant.size);
1646 align = align.max(variant.align);
1649 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1651 size = size.align_to(align.abi);
1653 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1657 Abi::Aggregate { sized: true }
1660 let layout = tcx.intern_layout(Layout {
1661 variants: Variants::Multiple {
1663 tag_encoding: TagEncoding::Direct,
1664 tag_field: tag_index,
1667 fields: outer_fields,
1669 largest_niche: prefix.largest_niche,
1673 debug!("generator layout ({:?}): {:#?}", ty, layout);
1677 /// This is invoked by the `layout_raw` query to record the final
1678 /// layout of each type.
1680 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1681 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1682 // for dumping later.
1683 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1684 self.record_layout_for_printing_outlined(layout)
1688 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1689 // Ignore layouts that are done with non-empty environments or
1690 // non-monomorphic layouts, as the user only wants to see the stuff
1691 // resulting from the final codegen session.
1692 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1696 // (delay format until we actually need it)
1697 let record = |kind, packed, opt_discr_size, variants| {
1698 let type_desc = format!("{:?}", layout.ty);
1699 self.tcx.sess.code_stats.record_type_size(
1710 let adt_def = match *layout.ty.kind() {
1711 ty::Adt(ref adt_def, _) => {
1712 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1716 ty::Closure(..) => {
1717 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1718 record(DataTypeKind::Closure, false, None, vec![]);
1723 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1728 let adt_kind = adt_def.adt_kind();
1729 let adt_packed = adt_def.repr.pack.is_some();
1731 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1732 let mut min_size = Size::ZERO;
1733 let field_info: Vec<_> = flds
1736 .map(|(i, &name)| match layout.field(self, i) {
1738 bug!("no layout found for field {}: `{:?}`", name, err);
1740 Ok(field_layout) => {
1741 let offset = layout.fields.offset(i);
1742 let field_end = offset + field_layout.size;
1743 if min_size < field_end {
1744 min_size = field_end;
1747 name: name.to_string(),
1748 offset: offset.bytes(),
1749 size: field_layout.size.bytes(),
1750 align: field_layout.align.abi.bytes(),
1757 name: n.map(|n| n.to_string()),
1758 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1759 align: layout.align.abi.bytes(),
1760 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1765 match layout.variants {
1766 Variants::Single { index } => {
1767 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1768 if !adt_def.variants.is_empty() {
1769 let variant_def = &adt_def.variants[index];
1770 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1775 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1778 // (This case arises for *empty* enums; so give it
1780 record(adt_kind.into(), adt_packed, None, vec![]);
1784 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1786 "print-type-size `{:#?}` adt general variants def {}",
1788 adt_def.variants.len()
1790 let variant_infos: Vec<_> = adt_def
1793 .map(|(i, variant_def)| {
1794 let fields: Vec<_> =
1795 variant_def.fields.iter().map(|f| f.ident.name).collect();
1797 Some(variant_def.ident),
1799 layout.for_variant(self, i),
1806 match tag_encoding {
1807 TagEncoding::Direct => Some(tag.value.size(self)),
1817 /// Type size "skeleton", i.e., the only information determining a type's size.
1818 /// While this is conservative, (aside from constant sizes, only pointers,
1819 /// newtypes thereof and null pointer optimized enums are allowed), it is
1820 /// enough to statically check common use cases of transmute.
1821 #[derive(Copy, Clone, Debug)]
1822 pub enum SizeSkeleton<'tcx> {
1823 /// Any statically computable Layout.
1826 /// A potentially-fat pointer.
1828 /// If true, this pointer is never null.
1830 /// The type which determines the unsized metadata, if any,
1831 /// of this pointer. Either a type parameter or a projection
1832 /// depending on one, with regions erased.
1837 impl<'tcx> SizeSkeleton<'tcx> {
1841 param_env: ty::ParamEnv<'tcx>,
1842 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1843 debug_assert!(!ty.has_infer_types_or_consts());
1845 // First try computing a static layout.
1846 let err = match tcx.layout_of(param_env.and(ty)) {
1848 return Ok(SizeSkeleton::Known(layout.size));
1854 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1855 let non_zero = !ty.is_unsafe_ptr();
1856 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1858 ty::Param(_) | ty::Projection(_) => {
1859 debug_assert!(tail.has_param_types_or_consts());
1860 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1863 "SizeSkeleton::compute({}): layout errored ({}), yet \
1864 tail `{}` is not a type parameter or a projection",
1872 ty::Adt(def, substs) => {
1873 // Only newtypes and enums w/ nullable pointer optimization.
1874 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1878 // Get a zero-sized variant or a pointer newtype.
1879 let zero_or_ptr_variant = |i| {
1880 let i = VariantIdx::new(i);
1881 let fields = def.variants[i]
1884 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1886 for field in fields {
1889 SizeSkeleton::Known(size) => {
1890 if size.bytes() > 0 {
1894 SizeSkeleton::Pointer { .. } => {
1905 let v0 = zero_or_ptr_variant(0)?;
1907 if def.variants.len() == 1 {
1908 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1909 return Ok(SizeSkeleton::Pointer {
1911 || match tcx.layout_scalar_valid_range(def.did) {
1912 (Bound::Included(start), Bound::Unbounded) => start > 0,
1913 (Bound::Included(start), Bound::Included(end)) => {
1914 0 < start && start < end
1925 let v1 = zero_or_ptr_variant(1)?;
1926 // Nullable pointer enum optimization.
1928 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1929 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1930 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1936 ty::Projection(_) | ty::Opaque(..) => {
1937 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1938 if ty == normalized {
1941 SizeSkeleton::compute(normalized, tcx, param_env)
1949 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1950 match (self, other) {
1951 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1952 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1960 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1961 fn tcx(&self) -> TyCtxt<'tcx>;
1964 pub trait HasParamEnv<'tcx> {
1965 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1968 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1969 fn data_layout(&self) -> &TargetDataLayout {
1974 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1975 fn tcx(&self) -> TyCtxt<'tcx> {
1980 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1981 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1986 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1987 fn data_layout(&self) -> &TargetDataLayout {
1988 self.tcx.data_layout()
1992 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1993 fn tcx(&self) -> TyCtxt<'tcx> {
1998 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2000 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
2002 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2004 /// Computes the layout of a type. Note that this implicitly
2005 /// executes in "reveal all" mode.
2006 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2007 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
2008 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2009 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2010 let layout = TyAndLayout { ty, layout };
2012 // N.B., this recording is normally disabled; when enabled, it
2013 // can however trigger recursive invocations of `layout_of`.
2014 // Therefore, we execute it *after* the main query has
2015 // completed, to avoid problems around recursive structures
2016 // and the like. (Admittedly, I wasn't able to reproduce a problem
2017 // here, but it seems like the right thing to do. -nmatsakis)
2018 self.record_layout_for_printing(layout);
2024 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2026 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2028 /// Computes the layout of a type. Note that this implicitly
2029 /// executes in "reveal all" mode.
2030 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2031 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2032 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2033 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2034 let layout = TyAndLayout { ty, layout };
2036 // N.B., this recording is normally disabled; when enabled, it
2037 // can however trigger recursive invocations of `layout_of`.
2038 // Therefore, we execute it *after* the main query has
2039 // completed, to avoid problems around recursive structures
2040 // and the like. (Admittedly, I wasn't able to reproduce a problem
2041 // here, but it seems like the right thing to do. -nmatsakis)
2042 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2043 cx.record_layout_for_printing(layout);
2049 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2051 /// Computes the layout of a type. Note that this implicitly
2052 /// executes in "reveal all" mode.
2056 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2057 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2058 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2059 cx.layout_of(param_env_and_ty.value)
2063 impl ty::query::TyCtxtAt<'tcx> {
2064 /// Computes the layout of a type. Note that this implicitly
2065 /// executes in "reveal all" mode.
2069 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2070 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2071 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2072 cx.layout_of(param_env_and_ty.value)
2076 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2078 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2080 + HasParamEnv<'tcx>,
2083 this: TyAndLayout<'tcx>,
2085 variant_index: VariantIdx,
2086 ) -> TyAndLayout<'tcx> {
2087 let layout = match this.variants {
2088 Variants::Single { index }
2089 // If all variants but one are uninhabited, the variant layout is the enum layout.
2090 if index == variant_index &&
2091 // Don't confuse variants of uninhabited enums with the enum itself.
2092 // For more details see https://github.com/rust-lang/rust/issues/69763.
2093 this.fields != FieldsShape::Primitive =>
2098 Variants::Single { index } => {
2099 // Deny calling for_variant more than once for non-Single enums.
2100 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2101 assert_eq!(original_layout.variants, Variants::Single { index });
2104 let fields = match this.ty.kind() {
2105 ty::Adt(def, _) if def.variants.is_empty() =>
2106 bug!("for_variant called on zero-variant enum"),
2107 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2111 tcx.intern_layout(Layout {
2112 variants: Variants::Single { index: variant_index },
2113 fields: match NonZeroUsize::new(fields) {
2114 Some(fields) => FieldsShape::Union(fields),
2115 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2117 abi: Abi::Uninhabited,
2118 largest_niche: None,
2119 align: tcx.data_layout.i8_align,
2124 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2127 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2129 TyAndLayout { ty: this.ty, layout }
2132 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2133 enum TyMaybeWithLayout<C: LayoutOf> {
2135 TyAndLayout(C::TyAndLayout),
2138 fn ty_and_layout_kind<
2139 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2141 + HasParamEnv<'tcx>,
2143 this: TyAndLayout<'tcx>,
2147 ) -> TyMaybeWithLayout<C> {
2149 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2150 let layout = Layout::scalar(cx, tag.clone());
2151 MaybeResult::from(Ok(TyAndLayout {
2152 layout: tcx.intern_layout(layout),
2153 ty: tag.value.to_ty(tcx),
2166 | ty::GeneratorWitness(..)
2168 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2170 // Potentially-fat pointers.
2171 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2172 assert!(i < this.fields.count());
2174 // Reuse the fat `*T` type as its own thin pointer data field.
2175 // This provides information about, e.g., DST struct pointees
2176 // (which may have no non-DST form), and will work as long
2177 // as the `Abi` or `FieldsShape` is checked by users.
2179 let nil = tcx.mk_unit();
2180 let ptr_ty = if ty.is_unsafe_ptr() {
2183 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2185 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2186 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2193 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2194 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2195 ty::Dynamic(_, _) => {
2196 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2197 tcx.lifetimes.re_static,
2198 tcx.mk_array(tcx.types.usize, 3),
2200 /* FIXME: use actual fn pointers
2201 Warning: naively computing the number of entries in the
2202 vtable by counting the methods on the trait + methods on
2203 all parent traits does not work, because some methods can
2204 be not object safe and thus excluded from the vtable.
2205 Increase this counter if you tried to implement this but
2206 failed to do it without duplicating a lot of code from
2207 other places in the compiler: 2
2209 tcx.mk_array(tcx.types.usize, 3),
2210 tcx.mk_array(Option<fn()>),
2214 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2218 // Arrays and slices.
2219 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2220 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2222 // Tuples, generators and closures.
2223 ty::Closure(_, ref substs) => {
2224 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2227 ty::Generator(def_id, ref substs, _) => match this.variants {
2228 Variants::Single { index } => TyMaybeWithLayout::Ty(
2231 .state_tys(def_id, tcx)
2232 .nth(index.as_usize())
2237 Variants::Multiple { ref tag, tag_field, .. } => {
2239 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2241 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2245 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2248 ty::Adt(def, substs) => {
2249 match this.variants {
2250 Variants::Single { index } => {
2251 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2254 // Discriminant field for enums (where applicable).
2255 Variants::Multiple { ref tag, .. } => {
2257 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2264 | ty::Placeholder(..)
2268 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2272 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2273 TyMaybeWithLayout::Ty(result) => result,
2274 TyMaybeWithLayout::TyAndLayout(result) => return result,
2278 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2279 let addr_space_of_ty = |ty: Ty<'tcx>| {
2280 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2283 let pointee_info = match *this.ty.kind() {
2284 ty::RawPtr(mt) if offset.bytes() == 0 => {
2285 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2287 align: layout.align.abi,
2289 address_space: addr_space_of_ty(mt.ty),
2292 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2293 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2296 align: layout.align.abi,
2298 address_space: cx.data_layout().instruction_address_space,
2302 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2303 let address_space = addr_space_of_ty(ty);
2305 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2306 let kind = match mt {
2307 hir::Mutability::Not => {
2314 hir::Mutability::Mut => {
2315 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2316 // panic=abort mode. That was deemed right, as prior versions had many bugs
2317 // in conjunction with unwinding, but later versions didn’t seem to have
2318 // said issues. See issue #31681.
2320 // Alas, later on we encountered a case where noalias would generate wrong
2321 // code altogether even with recent versions of LLVM in *safe* code with no
2322 // unwinding involved. See #54462.
2324 // For now, do not enable mutable_noalias by default at all, while the
2325 // issue is being figured out.
2326 if tcx.sess.opts.debugging_opts.mutable_noalias {
2327 PointerKind::UniqueBorrowed
2334 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2336 align: layout.align.abi,
2343 let mut data_variant = match this.variants {
2344 // Within the discriminant field, only the niche itself is
2345 // always initialized, so we only check for a pointer at its
2348 // If the niche is a pointer, it's either valid (according
2349 // to its type), or null (which the niche field's scalar
2350 // validity range encodes). This allows using
2351 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2352 // this will continue to work as long as we don't start
2353 // using more niches than just null (e.g., the first page of
2354 // the address space, or unaligned pointers).
2355 Variants::Multiple {
2356 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2359 } if this.fields.offset(tag_field) == offset => {
2360 Some(this.for_variant(cx, dataful_variant))
2365 if let Some(variant) = data_variant {
2366 // We're not interested in any unions.
2367 if let FieldsShape::Union(_) = variant.fields {
2368 data_variant = None;
2372 let mut result = None;
2374 if let Some(variant) = data_variant {
2375 let ptr_end = offset + Pointer.size(cx);
2376 for i in 0..variant.fields.count() {
2377 let field_start = variant.fields.offset(i);
2378 if field_start <= offset {
2379 let field = variant.field(cx, i);
2380 result = field.to_result().ok().and_then(|field| {
2381 if ptr_end <= field_start + field.size {
2382 // We found the right field, look inside it.
2384 field.pointee_info_at(cx, offset - field_start);
2390 if result.is_some() {
2397 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2398 if let Some(ref mut pointee) = result {
2399 if let ty::Adt(def, _) = this.ty.kind() {
2400 if def.is_box() && offset.bytes() == 0 {
2401 pointee.safe = Some(PointerKind::UniqueOwned);
2411 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2421 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2422 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2423 use crate::ty::layout::LayoutError::*;
2424 mem::discriminant(self).hash_stable(hcx, hasher);
2427 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2432 impl<'tcx> ty::Instance<'tcx> {
2433 // NOTE(eddyb) this is private to avoid using it from outside of
2434 // `FnAbi::of_instance` - any other uses are either too high-level
2435 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2436 // or should go through `FnAbi` instead, to avoid losing any
2437 // adjustments `FnAbi::of_instance` might be performing.
2438 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2439 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2440 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2443 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2444 // parameters unused if they show up in the signature, but not in the `mir::Body`
2445 // (i.e. due to being inside a projection that got normalized, see
2446 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2447 // track of a polymorphization `ParamEnv` to allow normalizing later.
2448 let mut sig = match *ty.kind() {
2449 ty::FnDef(def_id, substs) => tcx
2450 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2451 .subst(tcx, substs),
2452 _ => unreachable!(),
2455 if let ty::InstanceDef::VtableShim(..) = self.def {
2456 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2457 sig = sig.map_bound(|mut sig| {
2458 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2459 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2460 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2466 ty::Closure(def_id, substs) => {
2467 let sig = substs.as_closure().sig();
2469 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2470 sig.map_bound(|sig| {
2472 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2480 ty::Generator(_, substs, _) => {
2481 let sig = substs.as_generator().poly_sig();
2483 let br = ty::BoundRegion { kind: ty::BrEnv };
2484 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2485 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2487 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2488 let pin_adt_ref = tcx.adt_def(pin_did);
2489 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2490 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2492 sig.map_bound(|sig| {
2493 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2494 let state_adt_ref = tcx.adt_def(state_did);
2496 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2497 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2500 [env_ty, sig.resume_ty].iter(),
2503 hir::Unsafety::Normal,
2504 rustc_target::spec::abi::Abi::Rust,
2508 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2513 pub trait FnAbiExt<'tcx, C>
2515 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2519 + HasParamEnv<'tcx>,
2521 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2523 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2524 /// instead, where the instance is a `InstanceDef::Virtual`.
2525 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2527 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2528 /// direct calls to an `fn`.
2530 /// NB: that includes virtual calls, which are represented by "direct calls"
2531 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2532 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2536 sig: ty::PolyFnSig<'tcx>,
2537 extra_args: &[Ty<'tcx>],
2538 caller_location: Option<Ty<'tcx>>,
2539 codegen_fn_attr_flags: CodegenFnAttrFlags,
2540 make_self_ptr_thin: bool,
2542 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2546 panic_strategy: PanicStrategy,
2547 codegen_fn_attr_flags: CodegenFnAttrFlags,
2550 if panic_strategy != PanicStrategy::Unwind {
2551 // In panic=abort mode we assume nothing can unwind anywhere, so
2552 // optimize based on this!
2554 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2555 // If a specific #[unwind] attribute is present, use that.
2557 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2558 // Special attribute for allocator functions, which can't unwind.
2561 if call_conv == Conv::Rust {
2562 // Any Rust method (or `extern "Rust" fn` or `extern
2563 // "rust-call" fn`) is explicitly allowed to unwind
2564 // (unless it has no-unwind attribute, handled above).
2567 // Anything else is either:
2569 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2571 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2573 // Foreign items (case 1) are assumed to not unwind; it is
2574 // UB otherwise. (At least for now; see also
2575 // rust-lang/rust#63909 and Rust RFC 2753.)
2577 // Items defined in Rust with non-Rust ABIs (case 2) are also
2578 // not supposed to unwind. Whether this should be enforced
2579 // (versus stating it is UB) and *how* it would be enforced
2580 // is currently under discussion; see rust-lang/rust#58794.
2582 // In either case, we mark item as explicitly nounwind.
2588 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2590 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2594 + HasParamEnv<'tcx>,
2596 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2597 // Assume that fn pointers may always unwind
2598 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2600 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
2603 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2604 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2606 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2607 Some(cx.tcx().caller_location_ty())
2612 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2614 call::FnAbi::new_internal(
2620 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2626 sig: ty::PolyFnSig<'tcx>,
2627 extra_args: &[Ty<'tcx>],
2628 caller_location: Option<Ty<'tcx>>,
2629 codegen_fn_attr_flags: CodegenFnAttrFlags,
2630 force_thin_self_ptr: bool,
2632 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2634 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2636 use rustc_target::spec::abi::Abi::*;
2637 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2638 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2640 // It's the ABI's job to select this, not ours.
2641 System => bug!("system abi should be selected elsewhere"),
2642 EfiApi => bug!("eficall abi should be selected elsewhere"),
2644 Stdcall => Conv::X86Stdcall,
2645 Fastcall => Conv::X86Fastcall,
2646 Vectorcall => Conv::X86VectorCall,
2647 Thiscall => Conv::X86ThisCall,
2649 Unadjusted => Conv::C,
2650 Win64 => Conv::X86_64Win64,
2651 SysV64 => Conv::X86_64SysV,
2652 Aapcs => Conv::ArmAapcs,
2653 PtxKernel => Conv::PtxKernel,
2654 Msp430Interrupt => Conv::Msp430Intr,
2655 X86Interrupt => Conv::X86Intr,
2656 AmdGpuKernel => Conv::AmdGpuKernel,
2657 AvrInterrupt => Conv::AvrInterrupt,
2658 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2660 // These API constants ought to be more specific...
2664 let mut inputs = sig.inputs();
2665 let extra_args = if sig.abi == RustCall {
2666 assert!(!sig.c_variadic && extra_args.is_empty());
2668 if let Some(input) = sig.inputs().last() {
2669 if let ty::Tuple(tupled_arguments) = input.kind() {
2670 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2671 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2674 "argument to function with \"rust-call\" ABI \
2680 "argument to function with \"rust-call\" ABI \
2685 assert!(sig.c_variadic || extra_args.is_empty());
2689 let target = &cx.tcx().sess.target;
2690 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2691 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2692 let linux_s390x_gnu_like =
2693 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2694 let linux_sparc64_gnu_like =
2695 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2696 let linux_powerpc_gnu_like =
2697 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2698 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2700 // Handle safe Rust thin and fat pointers.
2701 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2703 layout: TyAndLayout<'tcx>,
2706 // Booleans are always an i1 that needs to be zero-extended.
2707 if scalar.is_bool() {
2708 attrs.ext(ArgExtension::Zext);
2712 // Only pointer types handled below.
2713 if scalar.value != Pointer {
2717 if scalar.valid_range.start() < scalar.valid_range.end() {
2718 if *scalar.valid_range.start() > 0 {
2719 attrs.set(ArgAttribute::NonNull);
2723 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2724 if let Some(kind) = pointee.safe {
2725 attrs.pointee_align = Some(pointee.align);
2727 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2728 // for the entire duration of the function as they can be deallocated
2729 // at any time. Set their valid size to 0.
2730 attrs.pointee_size = match kind {
2731 PointerKind::UniqueOwned => Size::ZERO,
2735 // `Box` pointer parameters never alias because ownership is transferred
2736 // `&mut` pointer parameters never alias other parameters,
2737 // or mutable global data
2739 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2740 // and can be marked as both `readonly` and `noalias`, as
2741 // LLVM's definition of `noalias` is based solely on memory
2742 // dependencies rather than pointer equality
2743 let no_alias = match kind {
2744 PointerKind::Shared => false,
2745 PointerKind::UniqueOwned => true,
2746 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2749 attrs.set(ArgAttribute::NoAlias);
2752 if kind == PointerKind::Frozen && !is_return {
2753 attrs.set(ArgAttribute::ReadOnly);
2759 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2760 let is_return = arg_idx.is_none();
2762 let layout = cx.layout_of(ty);
2763 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2764 // Don't pass the vtable, it's not an argument of the virtual fn.
2765 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2766 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2767 make_thin_self_ptr(cx, layout)
2772 let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2773 let mut attrs = ArgAttributes::new();
2774 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2778 if arg.layout.is_zst() {
2779 // For some forsaken reason, x86_64-pc-windows-gnu
2780 // doesn't ignore zero-sized struct arguments.
2781 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2785 && !linux_s390x_gnu_like
2786 && !linux_sparc64_gnu_like
2787 && !linux_powerpc_gnu_like)
2789 arg.mode = PassMode::Ignore;
2796 let mut fn_abi = FnAbi {
2797 ret: arg_of(sig.output(), None),
2802 .chain(caller_location)
2804 .map(|(i, ty)| arg_of(ty, Some(i)))
2806 c_variadic: sig.c_variadic,
2807 fixed_count: inputs.len(),
2809 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2811 fn_abi.adjust_for_abi(cx, sig.abi);
2812 debug!("FnAbi::new_internal = {:?}", fn_abi);
2816 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2817 if abi == SpecAbi::Unadjusted {
2821 if abi == SpecAbi::Rust
2822 || abi == SpecAbi::RustCall
2823 || abi == SpecAbi::RustIntrinsic
2824 || abi == SpecAbi::PlatformIntrinsic
2826 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2827 if arg.is_ignore() {
2831 match arg.layout.abi {
2832 Abi::Aggregate { .. } => {}
2834 // This is a fun case! The gist of what this is doing is
2835 // that we want callers and callees to always agree on the
2836 // ABI of how they pass SIMD arguments. If we were to *not*
2837 // make these arguments indirect then they'd be immediates
2838 // in LLVM, which means that they'd used whatever the
2839 // appropriate ABI is for the callee and the caller. That
2840 // means, for example, if the caller doesn't have AVX
2841 // enabled but the callee does, then passing an AVX argument
2842 // across this boundary would cause corrupt data to show up.
2844 // This problem is fixed by unconditionally passing SIMD
2845 // arguments through memory between callers and callees
2846 // which should get them all to agree on ABI regardless of
2847 // target feature sets. Some more information about this
2848 // issue can be found in #44367.
2850 // Note that the platform intrinsic ABI is exempt here as
2851 // that's how we connect up to LLVM and it's unstable
2852 // anyway, we control all calls to it in libstd.
2854 if abi != SpecAbi::PlatformIntrinsic
2855 && cx.tcx().sess.target.simd_types_indirect =>
2857 arg.make_indirect();
2864 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2865 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2866 let max_by_val_size = Pointer.size(cx) * 2;
2867 let size = arg.layout.size;
2869 if arg.layout.is_unsized() || size > max_by_val_size {
2870 arg.make_indirect();
2872 // We want to pass small aggregates as immediates, but using
2873 // a LLVM aggregate type for this leads to bad optimizations,
2874 // so we pick an appropriately sized integer type instead.
2875 arg.cast_to(Reg { kind: RegKind::Integer, size });
2878 fixup(&mut self.ret);
2879 for arg in &mut self.args {
2885 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2886 cx.tcx().sess.fatal(&msg);
2891 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
2893 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2895 + HasParamEnv<'tcx>,
2897 let fat_pointer_ty = if layout.is_unsized() {
2898 // unsized `self` is passed as a pointer to `self`
2899 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2900 cx.tcx().mk_mut_ptr(layout.ty)
2903 Abi::ScalarPair(..) => (),
2904 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2907 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2908 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2909 // elsewhere in the compiler as a method on a `dyn Trait`.
2910 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2911 // get a built-in pointer type
2912 let mut fat_pointer_layout = layout;
2913 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2914 && !fat_pointer_layout.ty.is_region_ptr()
2916 for i in 0..fat_pointer_layout.fields.count() {
2917 let field_layout = fat_pointer_layout.field(cx, i);
2919 if !field_layout.is_zst() {
2920 fat_pointer_layout = field_layout;
2921 continue 'descend_newtypes;
2925 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2928 fat_pointer_layout.ty
2931 // we now have a type like `*mut RcBox<dyn Trait>`
2932 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2933 // this is understood as a special case elsewhere in the compiler
2934 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2935 layout = cx.layout_of(unit_pointer_ty);
2936 layout.ty = fat_pointer_ty;