1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_ast::{self as ast, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42 impl IntegerExt for Integer {
43 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44 match (*self, signed) {
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
49 (I128, false) => tcx.types.u128,
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
54 (I128, true) => tcx.types.i128,
58 /// Gets the Integer type from an attr::IntType.
59 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60 let dl = cx.data_layout();
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69 dl.ptr_sized_integer()
74 /// Finds the appropriate Integer type and signedness for the given
75 /// signed discriminant range and `#[repr]` attribute.
76 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77 /// that shouldn't affect anything, other than maybe debuginfo.
84 ) -> (Integer, bool) {
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
100 "Integer::repr_discr: `#[repr]` hint too small for \
101 discriminant range of enum `{}",
105 return (discr, ity.is_signed());
109 match &tcx.sess.target.arch[..] {
110 // WARNING: the ARM EABI has two variants; the one corresponding
111 // to `at_least == I32` appears to be used on Linux and NetBSD,
112 // but some systems may use the variant corresponding to no
113 // lower bound. However, we don't run on those yet...?
114 "arm" => min_from_extern = Some(I32),
115 _ => min_from_extern = Some(I32),
119 let at_least = min_from_extern.unwrap_or(min_default);
121 // If there are no negative values, we can use the unsigned fit.
123 (cmp::max(unsigned_fit, at_least), false)
125 (cmp::max(signed_fit, at_least), true)
130 pub trait PrimitiveExt {
131 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
135 impl PrimitiveExt for Primitive {
136 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
138 Int(i, signed) => i.to_ty(tcx, signed),
139 F32 => tcx.types.f32,
140 F64 => tcx.types.f64,
141 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
145 /// Return an *integer* type matching this primitive.
146 /// Useful in particular when dealing with enum discriminants.
147 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
149 Int(i, signed) => i.to_ty(tcx, signed),
150 Pointer => tcx.types.usize,
151 F32 | F64 => bug!("floats do not have an int type"),
156 /// The first half of a fat pointer.
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
162 /// The second half of a fat pointer.
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
171 SizeOverflow(Ty<'tcx>),
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
177 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
178 LayoutError::SizeOverflow(ty) => {
179 write!(f, "values of the type `{}` are too big for the current architecture", ty)
187 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189 ty::tls::with_related_context(tcx, move |icx| {
190 let (param_env, ty) = query.into_parts();
192 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
196 // Update the ImplicitCtxt to increase the layout_depth
197 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
199 ty::tls::enter_context(&icx, |_| {
200 let cx = LayoutCx { tcx, param_env };
201 let layout = cx.layout_raw_uncached(ty);
202 // Type-level uninhabitedness should always imply ABI uninhabitedness.
203 if let Ok(layout) = layout {
204 if ty.conservative_is_privately_uninhabited(tcx) {
205 assert!(layout.abi.is_uninhabited());
213 pub fn provide(providers: &mut ty::query::Providers) {
214 *providers = ty::query::Providers { layout_raw, ..*providers };
217 pub struct LayoutCx<'tcx, C> {
219 pub param_env: ty::ParamEnv<'tcx>,
222 #[derive(Copy, Clone, Debug)]
224 /// A tuple, closure, or univariant which cannot be coerced to unsized.
226 /// A univariant, the last field of which may be coerced to unsized.
228 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229 Prefixed(Size, Align),
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238 let mut inverse = vec![0; map.len()];
239 for i in 0..map.len() {
240 inverse[map[i] as usize] = i as u32;
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247 let dl = self.data_layout();
248 let b_align = b.value.align(dl);
249 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250 let b_offset = a.value.size(dl).align_to(b_align.abi);
251 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
253 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254 // returns the last maximum.
255 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
257 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258 .max_by_key(|niche| niche.available(dl));
261 variants: Variants::Single { index: VariantIdx::new(0) },
262 fields: FieldsShape::Arbitrary {
263 offsets: vec![Size::ZERO, b_offset],
264 memory_index: vec![0, 1],
266 abi: Abi::ScalarPair(a, b),
273 fn univariant_uninterned(
276 fields: &[TyAndLayout<'_>],
279 ) -> Result<Layout, LayoutError<'tcx>> {
280 let dl = self.data_layout();
281 let pack = repr.pack;
282 if pack.is_some() && repr.align.is_some() {
283 bug!("struct cannot be packed and aligned");
286 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
288 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
290 let optimize = !repr.inhibit_struct_field_reordering_opt();
293 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294 let optimizing = &mut inverse_memory_index[..end];
295 let field_align = |f: &TyAndLayout<'_>| {
296 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
299 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300 optimizing.sort_by_key(|&x| {
301 // Place ZSTs first to avoid "interesting offsets",
302 // especially with only one or two non-ZST fields.
303 let f = &fields[x as usize];
304 (!f.is_zst(), cmp::Reverse(field_align(f)))
307 StructKind::Prefixed(..) => {
308 // Sort in ascending alignment so that the layout stay optimal
309 // regardless of the prefix
310 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
315 // inverse_memory_index holds field indices by increasing memory offset.
316 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317 // We now write field offsets to the corresponding offset slot;
318 // field 5 with offset 0 puts 0 in offsets[5].
319 // At the bottom of this function, we invert `inverse_memory_index` to
320 // produce `memory_index` (see `invert_mapping`).
322 let mut sized = true;
323 let mut offsets = vec![Size::ZERO; fields.len()];
324 let mut offset = Size::ZERO;
325 let mut largest_niche = None;
326 let mut largest_niche_available = 0;
328 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
330 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331 align = align.max(AbiAndPrefAlign::new(prefix_align));
332 offset = prefix_size.align_to(prefix_align);
335 for &i in &inverse_memory_index {
336 let field = fields[i as usize];
338 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
341 if field.is_unsized() {
345 // Invariant: offset < dl.obj_size_bound() <= 1<<61
346 let field_align = if let Some(pack) = pack {
347 field.align.min(AbiAndPrefAlign::new(pack))
351 offset = offset.align_to(field_align.abi);
352 align = align.max(field_align);
354 debug!("univariant offset: {:?} field: {:#?}", offset, field);
355 offsets[i as usize] = offset;
357 if !repr.hide_niche() {
358 if let Some(mut niche) = field.largest_niche.clone() {
359 let available = niche.available(dl);
360 if available > largest_niche_available {
361 largest_niche_available = available;
362 niche.offset += offset;
363 largest_niche = Some(niche);
368 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
371 if let Some(repr_align) = repr.align {
372 align = align.max(AbiAndPrefAlign::new(repr_align));
375 debug!("univariant min_size: {:?}", offset);
376 let min_size = offset;
378 // As stated above, inverse_memory_index holds field indices by increasing offset.
379 // This makes it an already-sorted view of the offsets vec.
380 // To invert it, consider:
381 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382 // Field 5 would be the first element, so memory_index is i:
383 // Note: if we didn't optimize, it's already right.
386 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
388 let size = min_size.align_to(align.abi);
389 let mut abi = Abi::Aggregate { sized };
391 // Unpack newtype ABIs and find scalar pairs.
392 if sized && size.bytes() > 0 {
393 // All other fields must be ZSTs.
394 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
396 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397 // We have exactly one non-ZST field.
398 (Some((i, field)), None, None) => {
399 // Field fills the struct and it has a scalar or scalar pair ABI.
400 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
403 // For plain scalars, or vectors of them, we can't unpack
404 // newtypes for `#[repr(C)]`, as that affects C ABIs.
405 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406 abi = field.abi.clone();
408 // But scalar pairs are Rust-specific and get
409 // treated as aggregates by C ABIs anyway.
410 Abi::ScalarPair(..) => {
411 abi = field.abi.clone();
418 // Two non-ZST fields, and they're both scalars.
420 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
424 // Order by the memory placement, not source order.
425 let ((i, a), (j, b)) =
426 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427 let pair = self.scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430 assert_eq!(memory_index, &[0, 1]);
435 if offsets[i] == pair_offsets[0]
436 && offsets[j] == pair_offsets[1]
437 && align == pair.align
440 // We can use `ScalarPair` only when it matches our
441 // already computed layout (including `#[repr(C)]`).
450 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451 abi = Abi::Uninhabited;
455 variants: Variants::Single { index: VariantIdx::new(0) },
456 fields: FieldsShape::Arbitrary { offsets, memory_index },
464 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
466 let param_env = self.param_env;
467 let dl = self.data_layout();
468 let scalar_unit = |value: Primitive| {
469 let bits = value.size(dl).bits();
470 assert!(bits <= 128);
471 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
473 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
475 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
476 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
478 debug_assert!(!ty.has_infer_types_or_consts());
480 Ok(match *ty.kind() {
482 ty::Bool => tcx.intern_layout(Layout::scalar(
484 Scalar { value: Int(I8, false), valid_range: 0..=1 },
486 ty::Char => tcx.intern_layout(Layout::scalar(
488 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
490 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
492 ty::Float(fty) => scalar(match fty {
493 ast::FloatTy::F32 => F32,
494 ast::FloatTy::F64 => F64,
497 let mut ptr = scalar_unit(Pointer);
498 ptr.valid_range = 1..=*ptr.valid_range.end();
499 tcx.intern_layout(Layout::scalar(self, ptr))
503 ty::Never => tcx.intern_layout(Layout {
504 variants: Variants::Single { index: VariantIdx::new(0) },
505 fields: FieldsShape::Primitive,
506 abi: Abi::Uninhabited,
512 // Potentially-wide pointers.
513 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
524 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
525 let metadata = match unsized_part.kind() {
527 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
529 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
531 let mut vtable = scalar_unit(Pointer);
532 vtable.valid_range = 1..=*vtable.valid_range.end();
535 _ => return Err(LayoutError::Unknown(unsized_part)),
538 // Effectively a (ptr, meta) tuple.
539 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
542 // Arrays and slices.
543 ty::Array(element, mut count) => {
544 if count.has_projections() {
545 count = tcx.normalize_erasing_regions(param_env, count);
546 if count.has_projections() {
547 return Err(LayoutError::Unknown(ty));
551 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
552 let element = self.layout_of(element)?;
554 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
556 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
559 Abi::Aggregate { sized: true }
562 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
564 tcx.intern_layout(Layout {
565 variants: Variants::Single { index: VariantIdx::new(0) },
566 fields: FieldsShape::Array { stride: element.size, count },
569 align: element.align,
573 ty::Slice(element) => {
574 let element = self.layout_of(element)?;
575 tcx.intern_layout(Layout {
576 variants: Variants::Single { index: VariantIdx::new(0) },
577 fields: FieldsShape::Array { stride: element.size, count: 0 },
578 abi: Abi::Aggregate { sized: false },
580 align: element.align,
584 ty::Str => tcx.intern_layout(Layout {
585 variants: Variants::Single { index: VariantIdx::new(0) },
586 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
587 abi: Abi::Aggregate { sized: false },
594 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
595 ty::Dynamic(..) | ty::Foreign(..) => {
596 let mut unit = self.univariant_uninterned(
599 &ReprOptions::default(),
600 StructKind::AlwaysSized,
603 Abi::Aggregate { ref mut sized } => *sized = false,
606 tcx.intern_layout(unit)
609 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
611 ty::Closure(_, ref substs) => {
612 let tys = substs.as_closure().upvar_tys();
614 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
615 &ReprOptions::default(),
616 StructKind::AlwaysSized,
622 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
626 .map(|k| self.layout_of(k.expect_ty()))
627 .collect::<Result<Vec<_>, _>>()?,
628 &ReprOptions::default(),
633 // SIMD vector types.
634 ty::Adt(def, substs) if def.repr.simd() => {
635 // Supported SIMD vectors are homogeneous ADTs with at least one field:
637 // * #[repr(simd)] struct S(T, T, T, T);
638 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
639 // * #[repr(simd)] struct S([T; 4])
641 // where T is a primitive scalar (integer/float/pointer).
643 // SIMD vectors with zero fields are not supported.
644 // (should be caught by typeck)
645 if def.non_enum_variant().fields.is_empty() {
646 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
649 // Type of the first ADT field:
650 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
652 // Heterogeneous SIMD vectors are not supported:
653 // (should be caught by typeck)
654 for fi in &def.non_enum_variant().fields {
655 if fi.ty(tcx, substs) != f0_ty {
656 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
660 // The element type and number of elements of the SIMD vector
661 // are obtained from:
663 // * the element type and length of the single array field, if
664 // the first field is of array type, or
666 // * the homogenous field type and the number of fields.
667 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
668 // First ADT field is an array:
670 // SIMD vectors with multiple array fields are not supported:
671 // (should be caught by typeck)
672 if def.non_enum_variant().fields.len() != 1 {
673 tcx.sess.fatal(&format!(
674 "monomorphising SIMD type `{}` with more than one array field",
679 // Extract the number of elements from the layout of the array field:
680 let len = if let Ok(TyAndLayout {
681 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
683 }) = self.layout_of(f0_ty)
687 return Err(LayoutError::Unknown(ty));
692 // First ADT field is not an array:
693 (f0_ty, def.non_enum_variant().fields.len() as _, false)
696 // SIMD vectors of zero length are not supported.
698 // Can't be caught in typeck if the array length is generic.
700 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
703 // Compute the ABI of the element type:
704 let e_ly = self.layout_of(e_ty)?;
705 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
708 // This error isn't caught in typeck, e.g., if
709 // the element type of the vector is generic.
710 tcx.sess.fatal(&format!(
711 "monomorphising SIMD type `{}` with a non-primitive-scalar \
712 (integer/float/pointer) element type `{}`",
717 // Compute the size and alignment of the vector:
718 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
719 let align = dl.vector_align(size);
720 let size = size.align_to(align.abi);
722 // Compute the placement of the vector fields:
723 let fields = if is_array {
724 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
726 FieldsShape::Array { stride: e_ly.size, count: e_len }
729 tcx.intern_layout(Layout {
730 variants: Variants::Single { index: VariantIdx::new(0) },
732 abi: Abi::Vector { element: e_abi, count: e_len },
733 largest_niche: e_ly.largest_niche.clone(),
740 ty::Adt(def, substs) => {
741 // Cache the field layouts.
748 .map(|field| self.layout_of(field.ty(tcx, substs)))
749 .collect::<Result<Vec<_>, _>>()
751 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
754 if def.repr.pack.is_some() && def.repr.align.is_some() {
755 bug!("union cannot be packed and aligned");
759 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
761 if let Some(repr_align) = def.repr.align {
762 align = align.max(AbiAndPrefAlign::new(repr_align));
765 let optimize = !def.repr.inhibit_union_abi_opt();
766 let mut size = Size::ZERO;
767 let mut abi = Abi::Aggregate { sized: true };
768 let index = VariantIdx::new(0);
769 for field in &variants[index] {
770 assert!(!field.is_unsized());
771 align = align.max(field.align);
773 // If all non-ZST fields have the same ABI, forward this ABI
774 if optimize && !field.is_zst() {
775 // Normalize scalar_unit to the maximal valid range
776 let field_abi = match &field.abi {
777 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
778 Abi::ScalarPair(x, y) => {
779 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
781 Abi::Vector { element: x, count } => {
782 Abi::Vector { element: scalar_unit(x.value), count: *count }
784 Abi::Uninhabited | Abi::Aggregate { .. } => {
785 Abi::Aggregate { sized: true }
789 if size == Size::ZERO {
790 // first non ZST: initialize 'abi'
792 } else if abi != field_abi {
793 // different fields have different ABI: reset to Aggregate
794 abi = Abi::Aggregate { sized: true };
798 size = cmp::max(size, field.size);
801 if let Some(pack) = def.repr.pack {
802 align = align.min(AbiAndPrefAlign::new(pack));
805 return Ok(tcx.intern_layout(Layout {
806 variants: Variants::Single { index },
807 fields: FieldsShape::Union(
808 NonZeroUsize::new(variants[index].len())
809 .ok_or(LayoutError::Unknown(ty))?,
814 size: size.align_to(align.abi),
818 // A variant is absent if it's uninhabited and only has ZST fields.
819 // Present uninhabited variants only require space for their fields,
820 // but *not* an encoding of the discriminant (e.g., a tag value).
821 // See issue #49298 for more details on the need to leave space
822 // for non-ZST uninhabited data (mostly partial initialization).
823 let absent = |fields: &[TyAndLayout<'_>]| {
824 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
825 let is_zst = fields.iter().all(|f| f.is_zst());
826 uninhabited && is_zst
828 let (present_first, present_second) = {
829 let mut present_variants = variants
831 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
832 (present_variants.next(), present_variants.next())
834 let present_first = match present_first {
835 Some(present_first) => present_first,
836 // Uninhabited because it has no variants, or only absent ones.
837 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
838 // If it's a struct, still compute a layout so that we can still compute the
840 None => VariantIdx::new(0),
843 let is_struct = !def.is_enum() ||
844 // Only one variant is present.
845 (present_second.is_none() &&
846 // Representation optimizations are allowed.
847 !def.repr.inhibit_enum_layout_opt());
849 // Struct, or univariant enum equivalent to a struct.
850 // (Typechecking will reject discriminant-sizing attrs.)
852 let v = present_first;
853 let kind = if def.is_enum() || variants[v].is_empty() {
854 StructKind::AlwaysSized
856 let param_env = tcx.param_env(def.did);
857 let last_field = def.variants[v].fields.last().unwrap();
859 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
861 StructKind::MaybeUnsized
863 StructKind::AlwaysSized
867 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
868 st.variants = Variants::Single { index: v };
869 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
871 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
872 // the asserts ensure that we are not using the
873 // `#[rustc_layout_scalar_valid_range(n)]`
874 // attribute to widen the range of anything as that would probably
875 // result in UB somewhere
876 // FIXME(eddyb) the asserts are probably not needed,
877 // as larger validity ranges would result in missed
878 // optimizations, *not* wrongly assuming the inner
879 // value is valid. e.g. unions enlarge validity ranges,
880 // because the values may be uninitialized.
881 if let Bound::Included(start) = start {
882 // FIXME(eddyb) this might be incorrect - it doesn't
883 // account for wrap-around (end < start) ranges.
884 assert!(*scalar.valid_range.start() <= start);
885 scalar.valid_range = start..=*scalar.valid_range.end();
887 if let Bound::Included(end) = end {
888 // FIXME(eddyb) this might be incorrect - it doesn't
889 // account for wrap-around (end < start) ranges.
890 assert!(*scalar.valid_range.end() >= end);
891 scalar.valid_range = *scalar.valid_range.start()..=end;
894 // Update `largest_niche` if we have introduced a larger niche.
895 let niche = if def.repr.hide_niche() {
898 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
900 if let Some(niche) = niche {
901 match &st.largest_niche {
902 Some(largest_niche) => {
903 // Replace the existing niche even if they're equal,
904 // because this one is at a lower offset.
905 if largest_niche.available(dl) <= niche.available(dl) {
906 st.largest_niche = Some(niche);
909 None => st.largest_niche = Some(niche),
914 start == Bound::Unbounded && end == Bound::Unbounded,
915 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
921 return Ok(tcx.intern_layout(st));
924 // At this point, we have handled all unions and
925 // structs. (We have also handled univariant enums
926 // that allow representation optimization.)
927 assert!(def.is_enum());
929 // The current code for niche-filling relies on variant indices
930 // instead of actual discriminants, so dataful enums with
931 // explicit discriminants (RFC #2363) would misbehave.
932 let no_explicit_discriminants = def
935 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
937 let mut niche_filling_layout = None;
939 // Niche-filling enum optimization.
940 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
941 let mut dataful_variant = None;
942 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
944 // Find one non-ZST variant.
945 'variants: for (v, fields) in variants.iter_enumerated() {
951 if dataful_variant.is_none() {
952 dataful_variant = Some(v);
955 dataful_variant = None;
960 niche_variants = *niche_variants.start().min(&v)..=v;
963 if niche_variants.start() > niche_variants.end() {
964 dataful_variant = None;
967 if let Some(i) = dataful_variant {
968 let count = (niche_variants.end().as_u32()
969 - niche_variants.start().as_u32()
972 // Find the field with the largest niche
973 let niche_candidate = variants[i]
976 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
977 .max_by_key(|(_, niche)| niche.available(dl));
979 if let Some((field_index, niche, (niche_start, niche_scalar))) =
980 niche_candidate.and_then(|(field_index, niche)| {
981 Some((field_index, niche, niche.reserve(self, count)?))
984 let mut align = dl.aggregate_align;
988 let mut st = self.univariant_uninterned(
992 StructKind::AlwaysSized,
994 st.variants = Variants::Single { index: j };
996 align = align.max(st.align);
1000 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1002 let offset = st[i].fields.offset(field_index) + niche.offset;
1003 let size = st[i].size;
1005 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1009 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1010 Abi::ScalarPair(ref first, ref second) => {
1011 // We need to use scalar_unit to reset the
1012 // valid range to the maximal one for that
1013 // primitive, because only the niche is
1014 // guaranteed to be initialised, not the
1016 if offset.bytes() == 0 {
1018 niche_scalar.clone(),
1019 scalar_unit(second.value),
1023 scalar_unit(first.value),
1024 niche_scalar.clone(),
1028 _ => Abi::Aggregate { sized: true },
1033 Niche::from_scalar(dl, offset, niche_scalar.clone());
1035 niche_filling_layout = Some(Layout {
1036 variants: Variants::Multiple {
1038 tag_encoding: TagEncoding::Niche {
1046 fields: FieldsShape::Arbitrary {
1047 offsets: vec![offset],
1048 memory_index: vec![0],
1059 let (mut min, mut max) = (i128::MAX, i128::MIN);
1060 let discr_type = def.repr.discr_type();
1061 let bits = Integer::from_attr(self, discr_type).size().bits();
1062 for (i, discr) in def.discriminants(tcx) {
1063 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1066 let mut x = discr.val as i128;
1067 if discr_type.is_signed() {
1068 // sign extend the raw representation to be an i128
1069 x = (x << (128 - bits)) >> (128 - bits);
1078 // We might have no inhabited variants, so pretend there's at least one.
1079 if (min, max) == (i128::MAX, i128::MIN) {
1083 assert!(min <= max, "discriminant range is {}...{}", min, max);
1084 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1086 let mut align = dl.aggregate_align;
1087 let mut size = Size::ZERO;
1089 // We're interested in the smallest alignment, so start large.
1090 let mut start_align = Align::from_bytes(256).unwrap();
1091 assert_eq!(Integer::for_align(dl, start_align), None);
1093 // repr(C) on an enum tells us to make a (tag, union) layout,
1094 // so we need to grow the prefix alignment to be at least
1095 // the alignment of the union. (This value is used both for
1096 // determining the alignment of the overall enum, and the
1097 // determining the alignment of the payload after the tag.)
1098 let mut prefix_align = min_ity.align(dl).abi;
1100 for fields in &variants {
1101 for field in fields {
1102 prefix_align = prefix_align.max(field.align.abi);
1107 // Create the set of structs that represent each variant.
1108 let mut layout_variants = variants
1110 .map(|(i, field_layouts)| {
1111 let mut st = self.univariant_uninterned(
1115 StructKind::Prefixed(min_ity.size(), prefix_align),
1117 st.variants = Variants::Single { index: i };
1118 // Find the first field we can't move later
1119 // to make room for a larger discriminant.
1121 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1123 if !field.is_zst() || field.align.abi.bytes() != 1 {
1124 start_align = start_align.min(field.align.abi);
1128 size = cmp::max(size, st.size);
1129 align = align.max(st.align);
1132 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1134 // Align the maximum variant size to the largest alignment.
1135 size = size.align_to(align.abi);
1137 if size.bytes() >= dl.obj_size_bound() {
1138 return Err(LayoutError::SizeOverflow(ty));
1141 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1142 if typeck_ity < min_ity {
1143 // It is a bug if Layout decided on a greater discriminant size than typeck for
1144 // some reason at this point (based on values discriminant can take on). Mostly
1145 // because this discriminant will be loaded, and then stored into variable of
1146 // type calculated by typeck. Consider such case (a bug): typeck decided on
1147 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1148 // discriminant values. That would be a bug, because then, in codegen, in order
1149 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1150 // space necessary to represent would have to be discarded (or layout is wrong
1151 // on thinking it needs 16 bits)
1153 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1157 // However, it is fine to make discr type however large (as an optimisation)
1158 // after this point – we’ll just truncate the value we load in codegen.
1161 // Check to see if we should use a different type for the
1162 // discriminant. We can safely use a type with the same size
1163 // as the alignment of the first field of each variant.
1164 // We increase the size of the discriminant to avoid LLVM copying
1165 // padding when it doesn't need to. This normally causes unaligned
1166 // load/stores and excessive memcpy/memset operations. By using a
1167 // bigger integer size, LLVM can be sure about its contents and
1168 // won't be so conservative.
1170 // Use the initial field alignment
1171 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1174 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1177 // If the alignment is not larger than the chosen discriminant size,
1178 // don't use the alignment as the final size.
1182 // Patch up the variants' first few fields.
1183 let old_ity_size = min_ity.size();
1184 let new_ity_size = ity.size();
1185 for variant in &mut layout_variants {
1186 match variant.fields {
1187 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1189 if *i <= old_ity_size {
1190 assert_eq!(*i, old_ity_size);
1194 // We might be making the struct larger.
1195 if variant.size <= old_ity_size {
1196 variant.size = new_ity_size;
1204 let tag_mask = !0u128 >> (128 - ity.size().bits());
1206 value: Int(ity, signed),
1207 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1209 let mut abi = Abi::Aggregate { sized: true };
1210 if tag.value.size(dl) == size {
1211 abi = Abi::Scalar(tag.clone());
1213 // Try to use a ScalarPair for all tagged enums.
1214 let mut common_prim = None;
1215 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1216 let offsets = match layout_variant.fields {
1217 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1221 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1222 let (field, offset) = match (fields.next(), fields.next()) {
1223 (None, None) => continue,
1224 (Some(pair), None) => pair,
1230 let prim = match field.abi {
1231 Abi::Scalar(ref scalar) => scalar.value,
1237 if let Some(pair) = common_prim {
1238 // This is pretty conservative. We could go fancier
1239 // by conflating things like i32 and u32, or even
1240 // realising that (u8, u8) could just cohabit with
1242 if pair != (prim, offset) {
1247 common_prim = Some((prim, offset));
1250 if let Some((prim, offset)) = common_prim {
1251 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1252 let pair_offsets = match pair.fields {
1253 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1254 assert_eq!(memory_index, &[0, 1]);
1259 if pair_offsets[0] == Size::ZERO
1260 && pair_offsets[1] == *offset
1261 && align == pair.align
1262 && size == pair.size
1264 // We can use `ScalarPair` only when it matches our
1265 // already computed layout (including `#[repr(C)]`).
1271 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1272 abi = Abi::Uninhabited;
1275 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1277 let tagged_layout = Layout {
1278 variants: Variants::Multiple {
1280 tag_encoding: TagEncoding::Direct,
1282 variants: layout_variants,
1284 fields: FieldsShape::Arbitrary {
1285 offsets: vec![Size::ZERO],
1286 memory_index: vec![0],
1294 let best_layout = match (tagged_layout, niche_filling_layout) {
1295 (tagged_layout, Some(niche_filling_layout)) => {
1296 // Pick the smaller layout; otherwise,
1297 // pick the layout with the larger niche; otherwise,
1298 // pick tagged as it has simpler codegen.
1299 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1301 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1302 (layout.size, cmp::Reverse(niche_size))
1305 (tagged_layout, None) => tagged_layout,
1308 tcx.intern_layout(best_layout)
1311 // Types with no meaningful known layout.
1312 ty::Projection(_) | ty::Opaque(..) => {
1313 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1314 if ty == normalized {
1315 return Err(LayoutError::Unknown(ty));
1317 tcx.layout_raw(param_env.and(normalized))?
1320 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1321 bug!("Layout::compute: unexpected type `{}`", ty)
1324 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1325 return Err(LayoutError::Unknown(ty));
1331 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1332 #[derive(Clone, Debug, PartialEq)]
1333 enum SavedLocalEligibility {
1335 Assigned(VariantIdx),
1336 // FIXME: Use newtype_index so we aren't wasting bytes
1337 Ineligible(Option<u32>),
1340 // When laying out generators, we divide our saved local fields into two
1341 // categories: overlap-eligible and overlap-ineligible.
1343 // Those fields which are ineligible for overlap go in a "prefix" at the
1344 // beginning of the layout, and always have space reserved for them.
1346 // Overlap-eligible fields are only assigned to one variant, so we lay
1347 // those fields out for each variant and put them right after the
1350 // Finally, in the layout details, we point to the fields from the
1351 // variants they are assigned to. It is possible for some fields to be
1352 // included in multiple variants. No field ever "moves around" in the
1353 // layout; its offset is always the same.
1355 // Also included in the layout are the upvars and the discriminant.
1356 // These are included as fields on the "outer" layout; they are not part
1358 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1359 /// Compute the eligibility and assignment of each local.
1360 fn generator_saved_local_eligibility(
1362 info: &GeneratorLayout<'tcx>,
1363 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1364 use SavedLocalEligibility::*;
1366 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1367 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1369 // The saved locals not eligible for overlap. These will get
1370 // "promoted" to the prefix of our generator.
1371 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1373 // Figure out which of our saved locals are fields in only
1374 // one variant. The rest are deemed ineligible for overlap.
1375 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1376 for local in fields {
1377 match assignments[*local] {
1379 assignments[*local] = Assigned(variant_index);
1382 // We've already seen this local at another suspension
1383 // point, so it is no longer a candidate.
1385 "removing local {:?} in >1 variant ({:?}, {:?})",
1390 ineligible_locals.insert(*local);
1391 assignments[*local] = Ineligible(None);
1398 // Next, check every pair of eligible locals to see if they
1400 for local_a in info.storage_conflicts.rows() {
1401 let conflicts_a = info.storage_conflicts.count(local_a);
1402 if ineligible_locals.contains(local_a) {
1406 for local_b in info.storage_conflicts.iter(local_a) {
1407 // local_a and local_b are storage live at the same time, therefore they
1408 // cannot overlap in the generator layout. The only way to guarantee
1409 // this is if they are in the same variant, or one is ineligible
1410 // (which means it is stored in every variant).
1411 if ineligible_locals.contains(local_b)
1412 || assignments[local_a] == assignments[local_b]
1417 // If they conflict, we will choose one to make ineligible.
1418 // This is not always optimal; it's just a greedy heuristic that
1419 // seems to produce good results most of the time.
1420 let conflicts_b = info.storage_conflicts.count(local_b);
1421 let (remove, other) =
1422 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1423 ineligible_locals.insert(remove);
1424 assignments[remove] = Ineligible(None);
1425 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1429 // Count the number of variants in use. If only one of them, then it is
1430 // impossible to overlap any locals in our layout. In this case it's
1431 // always better to make the remaining locals ineligible, so we can
1432 // lay them out with the other locals in the prefix and eliminate
1433 // unnecessary padding bytes.
1435 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1436 for assignment in &assignments {
1437 if let Assigned(idx) = assignment {
1438 used_variants.insert(*idx);
1441 if used_variants.count() < 2 {
1442 for assignment in assignments.iter_mut() {
1443 *assignment = Ineligible(None);
1445 ineligible_locals.insert_all();
1449 // Write down the order of our locals that will be promoted to the prefix.
1451 for (idx, local) in ineligible_locals.iter().enumerate() {
1452 assignments[local] = Ineligible(Some(idx as u32));
1455 debug!("generator saved local assignments: {:?}", assignments);
1457 (ineligible_locals, assignments)
1460 /// Compute the full generator layout.
1461 fn generator_layout(
1464 def_id: hir::def_id::DefId,
1465 substs: SubstsRef<'tcx>,
1466 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1467 use SavedLocalEligibility::*;
1469 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1471 let info = match tcx.generator_layout(def_id) {
1472 None => return Err(LayoutError::Unknown(ty)),
1475 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1477 // Build a prefix layout, including "promoting" all ineligible
1478 // locals as part of the prefix. We compute the layout of all of
1479 // these fields at once to get optimal packing.
1480 let tag_index = substs.as_generator().prefix_tys().count();
1482 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1483 let max_discr = (info.variant_fields.len() - 1) as u128;
1484 let discr_int = Integer::fit_unsigned(max_discr);
1485 let discr_int_ty = discr_int.to_ty(tcx, false);
1486 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1487 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1488 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1490 let promoted_layouts = ineligible_locals
1492 .map(|local| subst_field(info.field_tys[local]))
1493 .map(|ty| tcx.mk_maybe_uninit(ty))
1494 .map(|ty| self.layout_of(ty));
1495 let prefix_layouts = substs
1498 .map(|ty| self.layout_of(ty))
1499 .chain(iter::once(Ok(tag_layout)))
1500 .chain(promoted_layouts)
1501 .collect::<Result<Vec<_>, _>>()?;
1502 let prefix = self.univariant_uninterned(
1505 &ReprOptions::default(),
1506 StructKind::AlwaysSized,
1509 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1511 // Split the prefix layout into the "outer" fields (upvars and
1512 // discriminant) and the "promoted" fields. Promoted fields will
1513 // get included in each variant that requested them in
1515 debug!("prefix = {:#?}", prefix);
1516 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1517 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1518 let mut inverse_memory_index = invert_mapping(&memory_index);
1520 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1521 // "outer" and "promoted" fields respectively.
1522 let b_start = (tag_index + 1) as u32;
1523 let offsets_b = offsets.split_off(b_start as usize);
1524 let offsets_a = offsets;
1526 // Disentangle the "a" and "b" components of `inverse_memory_index`
1527 // by preserving the order but keeping only one disjoint "half" each.
1528 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1529 let inverse_memory_index_b: Vec<_> =
1530 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1531 inverse_memory_index.retain(|&i| i < b_start);
1532 let inverse_memory_index_a = inverse_memory_index;
1534 // Since `inverse_memory_index_{a,b}` each only refer to their
1535 // respective fields, they can be safely inverted
1536 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1537 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1540 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1541 (outer_fields, offsets_b, memory_index_b)
1546 let mut size = prefix.size;
1547 let mut align = prefix.align;
1551 .map(|(index, variant_fields)| {
1552 // Only include overlap-eligible fields when we compute our variant layout.
1553 let variant_only_tys = variant_fields
1555 .filter(|local| match assignments[**local] {
1556 Unassigned => bug!(),
1557 Assigned(v) if v == index => true,
1558 Assigned(_) => bug!("assignment does not match variant"),
1559 Ineligible(_) => false,
1561 .map(|local| subst_field(info.field_tys[*local]));
1563 let mut variant = self.univariant_uninterned(
1566 .map(|ty| self.layout_of(ty))
1567 .collect::<Result<Vec<_>, _>>()?,
1568 &ReprOptions::default(),
1569 StructKind::Prefixed(prefix_size, prefix_align.abi),
1571 variant.variants = Variants::Single { index };
1573 let (offsets, memory_index) = match variant.fields {
1574 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1578 // Now, stitch the promoted and variant-only fields back together in
1579 // the order they are mentioned by our GeneratorLayout.
1580 // Because we only use some subset (that can differ between variants)
1581 // of the promoted fields, we can't just pick those elements of the
1582 // `promoted_memory_index` (as we'd end up with gaps).
1583 // So instead, we build an "inverse memory_index", as if all of the
1584 // promoted fields were being used, but leave the elements not in the
1585 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1586 // obtain a valid (bijective) mapping.
1587 const INVALID_FIELD_IDX: u32 = !0;
1588 let mut combined_inverse_memory_index =
1589 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1590 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1591 let combined_offsets = variant_fields
1595 let (offset, memory_index) = match assignments[*local] {
1596 Unassigned => bug!(),
1598 let (offset, memory_index) =
1599 offsets_and_memory_index.next().unwrap();
1600 (offset, promoted_memory_index.len() as u32 + memory_index)
1602 Ineligible(field_idx) => {
1603 let field_idx = field_idx.unwrap() as usize;
1604 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1607 combined_inverse_memory_index[memory_index as usize] = i as u32;
1612 // Remove the unused slots and invert the mapping to obtain the
1613 // combined `memory_index` (also see previous comment).
1614 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1615 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1617 variant.fields = FieldsShape::Arbitrary {
1618 offsets: combined_offsets,
1619 memory_index: combined_memory_index,
1622 size = size.max(variant.size);
1623 align = align.max(variant.align);
1626 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1628 size = size.align_to(align.abi);
1630 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1634 Abi::Aggregate { sized: true }
1637 let layout = tcx.intern_layout(Layout {
1638 variants: Variants::Multiple {
1640 tag_encoding: TagEncoding::Direct,
1641 tag_field: tag_index,
1644 fields: outer_fields,
1646 largest_niche: prefix.largest_niche,
1650 debug!("generator layout ({:?}): {:#?}", ty, layout);
1654 /// This is invoked by the `layout_raw` query to record the final
1655 /// layout of each type.
1657 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1658 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1659 // for dumping later.
1660 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1661 self.record_layout_for_printing_outlined(layout)
1665 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1666 // Ignore layouts that are done with non-empty environments or
1667 // non-monomorphic layouts, as the user only wants to see the stuff
1668 // resulting from the final codegen session.
1669 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1673 // (delay format until we actually need it)
1674 let record = |kind, packed, opt_discr_size, variants| {
1675 let type_desc = format!("{:?}", layout.ty);
1676 self.tcx.sess.code_stats.record_type_size(
1687 let adt_def = match *layout.ty.kind() {
1688 ty::Adt(ref adt_def, _) => {
1689 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1693 ty::Closure(..) => {
1694 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1695 record(DataTypeKind::Closure, false, None, vec![]);
1700 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1705 let adt_kind = adt_def.adt_kind();
1706 let adt_packed = adt_def.repr.pack.is_some();
1708 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1709 let mut min_size = Size::ZERO;
1710 let field_info: Vec<_> = flds
1713 .map(|(i, &name)| match layout.field(self, i) {
1715 bug!("no layout found for field {}: `{:?}`", name, err);
1717 Ok(field_layout) => {
1718 let offset = layout.fields.offset(i);
1719 let field_end = offset + field_layout.size;
1720 if min_size < field_end {
1721 min_size = field_end;
1724 name: name.to_string(),
1725 offset: offset.bytes(),
1726 size: field_layout.size.bytes(),
1727 align: field_layout.align.abi.bytes(),
1734 name: n.map(|n| n.to_string()),
1735 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1736 align: layout.align.abi.bytes(),
1737 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1742 match layout.variants {
1743 Variants::Single { index } => {
1744 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1745 if !adt_def.variants.is_empty() {
1746 let variant_def = &adt_def.variants[index];
1747 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1752 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1755 // (This case arises for *empty* enums; so give it
1757 record(adt_kind.into(), adt_packed, None, vec![]);
1761 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1763 "print-type-size `{:#?}` adt general variants def {}",
1765 adt_def.variants.len()
1767 let variant_infos: Vec<_> = adt_def
1770 .map(|(i, variant_def)| {
1771 let fields: Vec<_> =
1772 variant_def.fields.iter().map(|f| f.ident.name).collect();
1774 Some(variant_def.ident),
1776 layout.for_variant(self, i),
1783 match tag_encoding {
1784 TagEncoding::Direct => Some(tag.value.size(self)),
1794 /// Type size "skeleton", i.e., the only information determining a type's size.
1795 /// While this is conservative, (aside from constant sizes, only pointers,
1796 /// newtypes thereof and null pointer optimized enums are allowed), it is
1797 /// enough to statically check common use cases of transmute.
1798 #[derive(Copy, Clone, Debug)]
1799 pub enum SizeSkeleton<'tcx> {
1800 /// Any statically computable Layout.
1803 /// A potentially-fat pointer.
1805 /// If true, this pointer is never null.
1807 /// The type which determines the unsized metadata, if any,
1808 /// of this pointer. Either a type parameter or a projection
1809 /// depending on one, with regions erased.
1814 impl<'tcx> SizeSkeleton<'tcx> {
1818 param_env: ty::ParamEnv<'tcx>,
1819 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1820 debug_assert!(!ty.has_infer_types_or_consts());
1822 // First try computing a static layout.
1823 let err = match tcx.layout_of(param_env.and(ty)) {
1825 return Ok(SizeSkeleton::Known(layout.size));
1831 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1832 let non_zero = !ty.is_unsafe_ptr();
1833 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1835 ty::Param(_) | ty::Projection(_) => {
1836 debug_assert!(tail.has_param_types_or_consts());
1837 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1840 "SizeSkeleton::compute({}): layout errored ({}), yet \
1841 tail `{}` is not a type parameter or a projection",
1849 ty::Adt(def, substs) => {
1850 // Only newtypes and enums w/ nullable pointer optimization.
1851 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1855 // Get a zero-sized variant or a pointer newtype.
1856 let zero_or_ptr_variant = |i| {
1857 let i = VariantIdx::new(i);
1858 let fields = def.variants[i]
1861 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1863 for field in fields {
1866 SizeSkeleton::Known(size) => {
1867 if size.bytes() > 0 {
1871 SizeSkeleton::Pointer { .. } => {
1882 let v0 = zero_or_ptr_variant(0)?;
1884 if def.variants.len() == 1 {
1885 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1886 return Ok(SizeSkeleton::Pointer {
1888 || match tcx.layout_scalar_valid_range(def.did) {
1889 (Bound::Included(start), Bound::Unbounded) => start > 0,
1890 (Bound::Included(start), Bound::Included(end)) => {
1891 0 < start && start < end
1902 let v1 = zero_or_ptr_variant(1)?;
1903 // Nullable pointer enum optimization.
1905 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1906 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1907 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1913 ty::Projection(_) | ty::Opaque(..) => {
1914 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1915 if ty == normalized {
1918 SizeSkeleton::compute(normalized, tcx, param_env)
1926 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1927 match (self, other) {
1928 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1929 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1937 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1938 fn tcx(&self) -> TyCtxt<'tcx>;
1941 pub trait HasParamEnv<'tcx> {
1942 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1945 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1946 fn data_layout(&self) -> &TargetDataLayout {
1951 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1952 fn tcx(&self) -> TyCtxt<'tcx> {
1957 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1958 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1963 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1964 fn data_layout(&self) -> &TargetDataLayout {
1965 self.tcx.data_layout()
1969 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1970 fn tcx(&self) -> TyCtxt<'tcx> {
1975 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1977 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1979 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1981 /// Computes the layout of a type. Note that this implicitly
1982 /// executes in "reveal all" mode.
1983 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1984 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1985 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1986 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1987 let layout = TyAndLayout { ty, layout };
1989 // N.B., this recording is normally disabled; when enabled, it
1990 // can however trigger recursive invocations of `layout_of`.
1991 // Therefore, we execute it *after* the main query has
1992 // completed, to avoid problems around recursive structures
1993 // and the like. (Admittedly, I wasn't able to reproduce a problem
1994 // here, but it seems like the right thing to do. -nmatsakis)
1995 self.record_layout_for_printing(layout);
2001 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2003 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2005 /// Computes the layout of a type. Note that this implicitly
2006 /// executes in "reveal all" mode.
2007 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2008 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2009 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2010 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2011 let layout = TyAndLayout { ty, layout };
2013 // N.B., this recording is normally disabled; when enabled, it
2014 // can however trigger recursive invocations of `layout_of`.
2015 // Therefore, we execute it *after* the main query has
2016 // completed, to avoid problems around recursive structures
2017 // and the like. (Admittedly, I wasn't able to reproduce a problem
2018 // here, but it seems like the right thing to do. -nmatsakis)
2019 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2020 cx.record_layout_for_printing(layout);
2026 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2028 /// Computes the layout of a type. Note that this implicitly
2029 /// executes in "reveal all" mode.
2033 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2034 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2035 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2036 cx.layout_of(param_env_and_ty.value)
2040 impl ty::query::TyCtxtAt<'tcx> {
2041 /// Computes the layout of a type. Note that this implicitly
2042 /// executes in "reveal all" mode.
2046 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2047 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2048 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2049 cx.layout_of(param_env_and_ty.value)
2053 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2055 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2057 + HasParamEnv<'tcx>,
2060 this: TyAndLayout<'tcx>,
2062 variant_index: VariantIdx,
2063 ) -> TyAndLayout<'tcx> {
2064 let layout = match this.variants {
2065 Variants::Single { index }
2066 // If all variants but one are uninhabited, the variant layout is the enum layout.
2067 if index == variant_index &&
2068 // Don't confuse variants of uninhabited enums with the enum itself.
2069 // For more details see https://github.com/rust-lang/rust/issues/69763.
2070 this.fields != FieldsShape::Primitive =>
2075 Variants::Single { index } => {
2076 // Deny calling for_variant more than once for non-Single enums.
2077 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2078 assert_eq!(original_layout.variants, Variants::Single { index });
2081 let fields = match this.ty.kind() {
2082 ty::Adt(def, _) if def.variants.is_empty() =>
2083 bug!("for_variant called on zero-variant enum"),
2084 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2088 tcx.intern_layout(Layout {
2089 variants: Variants::Single { index: variant_index },
2090 fields: match NonZeroUsize::new(fields) {
2091 Some(fields) => FieldsShape::Union(fields),
2092 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2094 abi: Abi::Uninhabited,
2095 largest_niche: None,
2096 align: tcx.data_layout.i8_align,
2101 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2104 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2106 TyAndLayout { ty: this.ty, layout }
2109 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2110 enum TyMaybeWithLayout<C: LayoutOf> {
2112 TyAndLayout(C::TyAndLayout),
2115 fn ty_and_layout_kind<
2116 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2118 + HasParamEnv<'tcx>,
2120 this: TyAndLayout<'tcx>,
2124 ) -> TyMaybeWithLayout<C> {
2126 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2127 let layout = Layout::scalar(cx, tag.clone());
2128 MaybeResult::from(Ok(TyAndLayout {
2129 layout: tcx.intern_layout(layout),
2130 ty: tag.value.to_ty(tcx),
2143 | ty::GeneratorWitness(..)
2145 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2147 // Potentially-fat pointers.
2148 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2149 assert!(i < this.fields.count());
2151 // Reuse the fat `*T` type as its own thin pointer data field.
2152 // This provides information about, e.g., DST struct pointees
2153 // (which may have no non-DST form), and will work as long
2154 // as the `Abi` or `FieldsShape` is checked by users.
2156 let nil = tcx.mk_unit();
2157 let ptr_ty = if ty.is_unsafe_ptr() {
2160 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2162 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2163 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2170 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2171 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2172 ty::Dynamic(_, _) => {
2173 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2174 tcx.lifetimes.re_static,
2175 tcx.mk_array(tcx.types.usize, 3),
2177 /* FIXME: use actual fn pointers
2178 Warning: naively computing the number of entries in the
2179 vtable by counting the methods on the trait + methods on
2180 all parent traits does not work, because some methods can
2181 be not object safe and thus excluded from the vtable.
2182 Increase this counter if you tried to implement this but
2183 failed to do it without duplicating a lot of code from
2184 other places in the compiler: 2
2186 tcx.mk_array(tcx.types.usize, 3),
2187 tcx.mk_array(Option<fn()>),
2191 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2195 // Arrays and slices.
2196 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2197 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2199 // Tuples, generators and closures.
2200 ty::Closure(_, ref substs) => {
2201 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2204 ty::Generator(def_id, ref substs, _) => match this.variants {
2205 Variants::Single { index } => TyMaybeWithLayout::Ty(
2208 .state_tys(def_id, tcx)
2209 .nth(index.as_usize())
2214 Variants::Multiple { ref tag, tag_field, .. } => {
2216 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2218 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2222 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2225 ty::Adt(def, substs) => {
2226 match this.variants {
2227 Variants::Single { index } => {
2228 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2231 // Discriminant field for enums (where applicable).
2232 Variants::Multiple { ref tag, .. } => {
2234 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2241 | ty::Placeholder(..)
2245 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2249 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2250 TyMaybeWithLayout::Ty(result) => result,
2251 TyMaybeWithLayout::TyAndLayout(result) => return result,
2255 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2256 let addr_space_of_ty = |ty: Ty<'tcx>| {
2257 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2260 let pointee_info = match *this.ty.kind() {
2261 ty::RawPtr(mt) if offset.bytes() == 0 => {
2262 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2264 align: layout.align.abi,
2266 address_space: addr_space_of_ty(mt.ty),
2269 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2270 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2273 align: layout.align.abi,
2275 address_space: cx.data_layout().instruction_address_space,
2279 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2280 let address_space = addr_space_of_ty(ty);
2282 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2283 let kind = match mt {
2284 hir::Mutability::Not => {
2291 hir::Mutability::Mut => {
2292 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2293 // panic=abort mode. That was deemed right, as prior versions had many bugs
2294 // in conjunction with unwinding, but later versions didn’t seem to have
2295 // said issues. See issue #31681.
2297 // Alas, later on we encountered a case where noalias would generate wrong
2298 // code altogether even with recent versions of LLVM in *safe* code with no
2299 // unwinding involved. See #54462.
2301 // For now, do not enable mutable_noalias by default at all, while the
2302 // issue is being figured out.
2303 if tcx.sess.opts.debugging_opts.mutable_noalias {
2304 PointerKind::UniqueBorrowed
2311 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2313 align: layout.align.abi,
2320 let mut data_variant = match this.variants {
2321 // Within the discriminant field, only the niche itself is
2322 // always initialized, so we only check for a pointer at its
2325 // If the niche is a pointer, it's either valid (according
2326 // to its type), or null (which the niche field's scalar
2327 // validity range encodes). This allows using
2328 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2329 // this will continue to work as long as we don't start
2330 // using more niches than just null (e.g., the first page of
2331 // the address space, or unaligned pointers).
2332 Variants::Multiple {
2333 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2336 } if this.fields.offset(tag_field) == offset => {
2337 Some(this.for_variant(cx, dataful_variant))
2342 if let Some(variant) = data_variant {
2343 // We're not interested in any unions.
2344 if let FieldsShape::Union(_) = variant.fields {
2345 data_variant = None;
2349 let mut result = None;
2351 if let Some(variant) = data_variant {
2352 let ptr_end = offset + Pointer.size(cx);
2353 for i in 0..variant.fields.count() {
2354 let field_start = variant.fields.offset(i);
2355 if field_start <= offset {
2356 let field = variant.field(cx, i);
2357 result = field.to_result().ok().and_then(|field| {
2358 if ptr_end <= field_start + field.size {
2359 // We found the right field, look inside it.
2361 field.pointee_info_at(cx, offset - field_start);
2367 if result.is_some() {
2374 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2375 if let Some(ref mut pointee) = result {
2376 if let ty::Adt(def, _) = this.ty.kind() {
2377 if def.is_box() && offset.bytes() == 0 {
2378 pointee.safe = Some(PointerKind::UniqueOwned);
2388 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2398 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2399 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2400 use crate::ty::layout::LayoutError::*;
2401 mem::discriminant(self).hash_stable(hcx, hasher);
2404 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2409 impl<'tcx> ty::Instance<'tcx> {
2410 // NOTE(eddyb) this is private to avoid using it from outside of
2411 // `FnAbi::of_instance` - any other uses are either too high-level
2412 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2413 // or should go through `FnAbi` instead, to avoid losing any
2414 // adjustments `FnAbi::of_instance` might be performing.
2415 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2416 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2417 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2420 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2421 // parameters unused if they show up in the signature, but not in the `mir::Body`
2422 // (i.e. due to being inside a projection that got normalized, see
2423 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2424 // track of a polymorphization `ParamEnv` to allow normalizing later.
2425 let mut sig = match *ty.kind() {
2426 ty::FnDef(def_id, substs) => tcx
2427 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2428 .subst(tcx, substs),
2429 _ => unreachable!(),
2432 if let ty::InstanceDef::VtableShim(..) = self.def {
2433 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2434 sig = sig.map_bound(|mut sig| {
2435 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2436 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2437 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2443 ty::Closure(def_id, substs) => {
2444 let sig = substs.as_closure().sig();
2446 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2447 sig.map_bound(|sig| {
2449 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2457 ty::Generator(_, substs, _) => {
2458 let sig = substs.as_generator().poly_sig();
2460 let br = ty::BoundRegion { kind: ty::BrEnv };
2461 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2462 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2464 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2465 let pin_adt_ref = tcx.adt_def(pin_did);
2466 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2467 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2469 sig.map_bound(|sig| {
2470 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2471 let state_adt_ref = tcx.adt_def(state_did);
2473 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2474 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2477 [env_ty, sig.resume_ty].iter(),
2480 hir::Unsafety::Normal,
2481 rustc_target::spec::abi::Abi::Rust,
2485 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2490 pub trait FnAbiExt<'tcx, C>
2492 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2496 + HasParamEnv<'tcx>,
2498 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2500 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2501 /// instead, where the instance is a `InstanceDef::Virtual`.
2502 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2504 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2505 /// direct calls to an `fn`.
2507 /// NB: that includes virtual calls, which are represented by "direct calls"
2508 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2509 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2513 sig: ty::PolyFnSig<'tcx>,
2514 extra_args: &[Ty<'tcx>],
2515 caller_location: Option<Ty<'tcx>>,
2516 codegen_fn_attr_flags: CodegenFnAttrFlags,
2517 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2519 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2523 panic_strategy: PanicStrategy,
2524 codegen_fn_attr_flags: CodegenFnAttrFlags,
2527 if panic_strategy != PanicStrategy::Unwind {
2528 // In panic=abort mode we assume nothing can unwind anywhere, so
2529 // optimize based on this!
2531 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2532 // If a specific #[unwind] attribute is present, use that.
2534 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2535 // Special attribute for allocator functions, which can't unwind.
2538 if call_conv == Conv::Rust {
2539 // Any Rust method (or `extern "Rust" fn` or `extern
2540 // "rust-call" fn`) is explicitly allowed to unwind
2541 // (unless it has no-unwind attribute, handled above).
2544 // Anything else is either:
2546 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2548 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2550 // Foreign items (case 1) are assumed to not unwind; it is
2551 // UB otherwise. (At least for now; see also
2552 // rust-lang/rust#63909 and Rust RFC 2753.)
2554 // Items defined in Rust with non-Rust ABIs (case 2) are also
2555 // not supposed to unwind. Whether this should be enforced
2556 // (versus stating it is UB) and *how* it would be enforced
2557 // is currently under discussion; see rust-lang/rust#58794.
2559 // In either case, we mark item as explicitly nounwind.
2565 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2567 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2571 + HasParamEnv<'tcx>,
2573 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2574 // Assume that fn pointers may always unwind
2575 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2577 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2578 ArgAbi::new(cx.layout_of(ty))
2582 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2583 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2585 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2586 Some(cx.tcx().caller_location_ty())
2591 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2593 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2594 let mut layout = cx.layout_of(ty);
2595 // Don't pass the vtable, it's not an argument of the virtual fn.
2596 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2597 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2598 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2599 let fat_pointer_ty = if layout.is_unsized() {
2600 // unsized `self` is passed as a pointer to `self`
2601 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2602 cx.tcx().mk_mut_ptr(layout.ty)
2605 Abi::ScalarPair(..) => (),
2606 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2609 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2610 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2611 // elsewhere in the compiler as a method on a `dyn Trait`.
2612 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2613 // get a built-in pointer type
2614 let mut fat_pointer_layout = layout;
2615 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2616 && !fat_pointer_layout.ty.is_region_ptr()
2618 for i in 0..fat_pointer_layout.fields.count() {
2619 let field_layout = fat_pointer_layout.field(cx, i);
2621 if !field_layout.is_zst() {
2622 fat_pointer_layout = field_layout;
2623 continue 'descend_newtypes;
2627 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2630 fat_pointer_layout.ty
2633 // we now have a type like `*mut RcBox<dyn Trait>`
2634 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2635 // this is understood as a special case elsewhere in the compiler
2636 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2637 layout = cx.layout_of(unit_pointer_ty);
2638 layout.ty = fat_pointer_ty;
2646 sig: ty::PolyFnSig<'tcx>,
2647 extra_args: &[Ty<'tcx>],
2648 caller_location: Option<Ty<'tcx>>,
2649 codegen_fn_attr_flags: CodegenFnAttrFlags,
2650 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2652 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2654 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2656 use rustc_target::spec::abi::Abi::*;
2657 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2658 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2660 // It's the ABI's job to select this, not ours.
2661 System => bug!("system abi should be selected elsewhere"),
2662 EfiApi => bug!("eficall abi should be selected elsewhere"),
2664 Stdcall => Conv::X86Stdcall,
2665 Fastcall => Conv::X86Fastcall,
2666 Vectorcall => Conv::X86VectorCall,
2667 Thiscall => Conv::X86ThisCall,
2669 Unadjusted => Conv::C,
2670 Win64 => Conv::X86_64Win64,
2671 SysV64 => Conv::X86_64SysV,
2672 Aapcs => Conv::ArmAapcs,
2673 PtxKernel => Conv::PtxKernel,
2674 Msp430Interrupt => Conv::Msp430Intr,
2675 X86Interrupt => Conv::X86Intr,
2676 AmdGpuKernel => Conv::AmdGpuKernel,
2677 AvrInterrupt => Conv::AvrInterrupt,
2678 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2680 // These API constants ought to be more specific...
2684 let mut inputs = sig.inputs();
2685 let extra_args = if sig.abi == RustCall {
2686 assert!(!sig.c_variadic && extra_args.is_empty());
2688 if let Some(input) = sig.inputs().last() {
2689 if let ty::Tuple(tupled_arguments) = input.kind() {
2690 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2691 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2694 "argument to function with \"rust-call\" ABI \
2700 "argument to function with \"rust-call\" ABI \
2705 assert!(sig.c_variadic || extra_args.is_empty());
2709 let target = &cx.tcx().sess.target;
2710 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2711 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2712 let linux_s390x_gnu_like =
2713 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2714 let linux_sparc64_gnu_like =
2715 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2716 let linux_powerpc_gnu_like =
2717 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2718 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2720 // Handle safe Rust thin and fat pointers.
2721 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2723 layout: TyAndLayout<'tcx>,
2726 // Booleans are always an i1 that needs to be zero-extended.
2727 if scalar.is_bool() {
2728 attrs.ext(ArgExtension::Zext);
2732 // Only pointer types handled below.
2733 if scalar.value != Pointer {
2737 if scalar.valid_range.start() < scalar.valid_range.end() {
2738 if *scalar.valid_range.start() > 0 {
2739 attrs.set(ArgAttribute::NonNull);
2743 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2744 if let Some(kind) = pointee.safe {
2745 attrs.pointee_align = Some(pointee.align);
2747 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2748 // for the entire duration of the function as they can be deallocated
2749 // at any time. Set their valid size to 0.
2750 attrs.pointee_size = match kind {
2751 PointerKind::UniqueOwned => Size::ZERO,
2755 // `Box` pointer parameters never alias because ownership is transferred
2756 // `&mut` pointer parameters never alias other parameters,
2757 // or mutable global data
2759 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2760 // and can be marked as both `readonly` and `noalias`, as
2761 // LLVM's definition of `noalias` is based solely on memory
2762 // dependencies rather than pointer equality
2763 let no_alias = match kind {
2764 PointerKind::Shared => false,
2765 PointerKind::UniqueOwned => true,
2766 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2769 attrs.set(ArgAttribute::NoAlias);
2772 if kind == PointerKind::Frozen && !is_return {
2773 attrs.set(ArgAttribute::ReadOnly);
2779 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2780 let is_return = arg_idx.is_none();
2781 let mut arg = mk_arg_type(ty, arg_idx);
2782 if arg.layout.is_zst() {
2783 // For some forsaken reason, x86_64-pc-windows-gnu
2784 // doesn't ignore zero-sized struct arguments.
2785 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2789 && !linux_s390x_gnu_like
2790 && !linux_sparc64_gnu_like
2791 && !linux_powerpc_gnu_like)
2793 arg.mode = PassMode::Ignore;
2797 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2798 if !is_return && rust_abi {
2799 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2800 let mut a_attrs = ArgAttributes::new();
2801 let mut b_attrs = ArgAttributes::new();
2802 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2803 adjust_for_rust_scalar(
2807 a.value.size(cx).align_to(b.value.align(cx).abi),
2810 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2815 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2816 if let PassMode::Direct(ref mut attrs) = arg.mode {
2817 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2824 let mut fn_abi = FnAbi {
2825 ret: arg_of(sig.output(), None),
2830 .chain(caller_location)
2832 .map(|(i, ty)| arg_of(ty, Some(i)))
2834 c_variadic: sig.c_variadic,
2835 fixed_count: inputs.len(),
2837 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2839 fn_abi.adjust_for_abi(cx, sig.abi);
2840 debug!("FnAbi::new_internal = {:?}", fn_abi);
2844 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2845 if abi == SpecAbi::Unadjusted {
2849 if abi == SpecAbi::Rust
2850 || abi == SpecAbi::RustCall
2851 || abi == SpecAbi::RustIntrinsic
2852 || abi == SpecAbi::PlatformIntrinsic
2854 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2855 if arg.is_ignore() {
2859 match arg.layout.abi {
2860 Abi::Aggregate { .. } => {}
2862 // This is a fun case! The gist of what this is doing is
2863 // that we want callers and callees to always agree on the
2864 // ABI of how they pass SIMD arguments. If we were to *not*
2865 // make these arguments indirect then they'd be immediates
2866 // in LLVM, which means that they'd used whatever the
2867 // appropriate ABI is for the callee and the caller. That
2868 // means, for example, if the caller doesn't have AVX
2869 // enabled but the callee does, then passing an AVX argument
2870 // across this boundary would cause corrupt data to show up.
2872 // This problem is fixed by unconditionally passing SIMD
2873 // arguments through memory between callers and callees
2874 // which should get them all to agree on ABI regardless of
2875 // target feature sets. Some more information about this
2876 // issue can be found in #44367.
2878 // Note that the platform intrinsic ABI is exempt here as
2879 // that's how we connect up to LLVM and it's unstable
2880 // anyway, we control all calls to it in libstd.
2882 if abi != SpecAbi::PlatformIntrinsic
2883 && cx.tcx().sess.target.simd_types_indirect =>
2885 arg.make_indirect();
2892 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2893 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2894 let max_by_val_size = Pointer.size(cx) * 2;
2895 let size = arg.layout.size;
2897 if arg.layout.is_unsized() || size > max_by_val_size {
2898 arg.make_indirect();
2900 // We want to pass small aggregates as immediates, but using
2901 // a LLVM aggregate type for this leads to bad optimizations,
2902 // so we pick an appropriately sized integer type instead.
2903 arg.cast_to(Reg { kind: RegKind::Integer, size });
2906 fixup(&mut self.ret);
2907 for arg in &mut self.args {
2913 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2914 cx.tcx().sess.fatal(&msg);