1 use crate::ich::StableHashingContext;
2 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4 use crate::ty::subst::Subst;
5 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7 use rustc_ast::{self as ast, IntTy, UintTy};
8 use rustc_attr as attr;
9 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
15 use rustc_span::symbol::{Ident, Symbol};
16 use rustc_span::DUMMY_SP;
17 use rustc_target::abi::call::{
18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 use rustc_target::abi::*;
21 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
27 use std::num::NonZeroUsize;
30 pub trait IntegerExt {
31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
42 impl IntegerExt for Integer {
43 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
44 match (*self, signed) {
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
49 (I128, false) => tcx.types.u128,
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
54 (I128, true) => tcx.types.i128,
58 /// Gets the Integer type from an attr::IntType.
59 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60 let dl = cx.data_layout();
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69 dl.ptr_sized_integer()
74 /// Finds the appropriate Integer type and signedness for the given
75 /// signed discriminant range and `#[repr]` attribute.
76 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
77 /// that shouldn't affect anything, other than maybe debuginfo.
84 ) -> (Integer, bool) {
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
100 "Integer::repr_discr: `#[repr]` hint too small for \
101 discriminant range of enum `{}",
105 return (discr, ity.is_signed());
109 match &tcx.sess.target.arch[..] {
110 // WARNING: the ARM EABI has two variants; the one corresponding
111 // to `at_least == I32` appears to be used on Linux and NetBSD,
112 // but some systems may use the variant corresponding to no
113 // lower bound. However, we don't run on those yet...?
114 "arm" => min_from_extern = Some(I32),
115 _ => min_from_extern = Some(I32),
119 let at_least = min_from_extern.unwrap_or(min_default);
121 // If there are no negative values, we can use the unsigned fit.
123 (cmp::max(unsigned_fit, at_least), false)
125 (cmp::max(signed_fit, at_least), true)
130 pub trait PrimitiveExt {
131 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
135 impl PrimitiveExt for Primitive {
136 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
138 Int(i, signed) => i.to_ty(tcx, signed),
139 F32 => tcx.types.f32,
140 F64 => tcx.types.f64,
141 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
145 /// Return an *integer* type matching this primitive.
146 /// Useful in particular when dealing with enum discriminants.
147 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
149 Int(i, signed) => i.to_ty(tcx, signed),
150 Pointer => tcx.types.usize,
151 F32 | F64 => bug!("floats do not have an int type"),
156 /// The first half of a fat pointer.
158 /// - For a trait object, this is the address of the box.
159 /// - For a slice, this is the base address.
160 pub const FAT_PTR_ADDR: usize = 0;
162 /// The second half of a fat pointer.
164 /// - For a trait object, this is the address of the vtable.
165 /// - For a slice, this is the length.
166 pub const FAT_PTR_EXTRA: usize = 1;
168 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
169 pub enum LayoutError<'tcx> {
171 SizeOverflow(Ty<'tcx>),
174 impl<'tcx> fmt::Display for LayoutError<'tcx> {
175 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
177 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
178 LayoutError::SizeOverflow(ty) => {
179 write!(f, "values of the type `{}` are too big for the current architecture", ty)
187 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
188 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
189 ty::tls::with_related_context(tcx, move |icx| {
190 let (param_env, ty) = query.into_parts();
192 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
193 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
196 // Update the ImplicitCtxt to increase the layout_depth
197 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
199 ty::tls::enter_context(&icx, |_| {
200 let cx = LayoutCx { tcx, param_env };
201 let layout = cx.layout_raw_uncached(ty);
202 // Type-level uninhabitedness should always imply ABI uninhabitedness.
203 if let Ok(layout) = layout {
204 if ty.conservative_is_privately_uninhabited(tcx) {
205 assert!(layout.abi.is_uninhabited());
213 pub fn provide(providers: &mut ty::query::Providers) {
214 *providers = ty::query::Providers { layout_raw, ..*providers };
217 pub struct LayoutCx<'tcx, C> {
219 pub param_env: ty::ParamEnv<'tcx>,
222 #[derive(Copy, Clone, Debug)]
224 /// A tuple, closure, or univariant which cannot be coerced to unsized.
226 /// A univariant, the last field of which may be coerced to unsized.
228 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229 Prefixed(Size, Align),
232 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233 // This is used to go between `memory_index` (source field order to memory order)
234 // and `inverse_memory_index` (memory order to source field order).
235 // See also `FieldsShape::Arbitrary::memory_index` for more details.
236 // FIXME(eddyb) build a better abstraction for permutations, if possible.
237 fn invert_mapping(map: &[u32]) -> Vec<u32> {
238 let mut inverse = vec![0; map.len()];
239 for i in 0..map.len() {
240 inverse[map[i] as usize] = i as u32;
245 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
246 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
247 let dl = self.data_layout();
248 let b_align = b.value.align(dl);
249 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250 let b_offset = a.value.size(dl).align_to(b_align.abi);
251 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
253 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254 // returns the last maximum.
255 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
257 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258 .max_by_key(|niche| niche.available(dl));
261 variants: Variants::Single { index: VariantIdx::new(0) },
262 fields: FieldsShape::Arbitrary {
263 offsets: vec![Size::ZERO, b_offset],
264 memory_index: vec![0, 1],
266 abi: Abi::ScalarPair(a, b),
273 fn univariant_uninterned(
276 fields: &[TyAndLayout<'_>],
279 ) -> Result<Layout, LayoutError<'tcx>> {
280 let dl = self.data_layout();
281 let pack = repr.pack;
282 if pack.is_some() && repr.align.is_some() {
283 bug!("struct cannot be packed and aligned");
286 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
288 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
290 let optimize = !repr.inhibit_struct_field_reordering_opt();
293 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294 let optimizing = &mut inverse_memory_index[..end];
295 let field_align = |f: &TyAndLayout<'_>| {
296 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
299 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300 optimizing.sort_by_key(|&x| {
301 // Place ZSTs first to avoid "interesting offsets",
302 // especially with only one or two non-ZST fields.
303 let f = &fields[x as usize];
304 (!f.is_zst(), cmp::Reverse(field_align(f)))
307 StructKind::Prefixed(..) => {
308 // Sort in ascending alignment so that the layout stay optimal
309 // regardless of the prefix
310 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
315 // inverse_memory_index holds field indices by increasing memory offset.
316 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317 // We now write field offsets to the corresponding offset slot;
318 // field 5 with offset 0 puts 0 in offsets[5].
319 // At the bottom of this function, we invert `inverse_memory_index` to
320 // produce `memory_index` (see `invert_mapping`).
322 let mut sized = true;
323 let mut offsets = vec![Size::ZERO; fields.len()];
324 let mut offset = Size::ZERO;
325 let mut largest_niche = None;
326 let mut largest_niche_available = 0;
328 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
330 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
331 align = align.max(AbiAndPrefAlign::new(prefix_align));
332 offset = prefix_size.align_to(prefix_align);
335 for &i in &inverse_memory_index {
336 let field = fields[i as usize];
338 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
341 if field.is_unsized() {
345 // Invariant: offset < dl.obj_size_bound() <= 1<<61
346 let field_align = if let Some(pack) = pack {
347 field.align.min(AbiAndPrefAlign::new(pack))
351 offset = offset.align_to(field_align.abi);
352 align = align.max(field_align);
354 debug!("univariant offset: {:?} field: {:#?}", offset, field);
355 offsets[i as usize] = offset;
357 if !repr.hide_niche() {
358 if let Some(mut niche) = field.largest_niche.clone() {
359 let available = niche.available(dl);
360 if available > largest_niche_available {
361 largest_niche_available = available;
362 niche.offset += offset;
363 largest_niche = Some(niche);
368 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
371 if let Some(repr_align) = repr.align {
372 align = align.max(AbiAndPrefAlign::new(repr_align));
375 debug!("univariant min_size: {:?}", offset);
376 let min_size = offset;
378 // As stated above, inverse_memory_index holds field indices by increasing offset.
379 // This makes it an already-sorted view of the offsets vec.
380 // To invert it, consider:
381 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382 // Field 5 would be the first element, so memory_index is i:
383 // Note: if we didn't optimize, it's already right.
386 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
388 let size = min_size.align_to(align.abi);
389 let mut abi = Abi::Aggregate { sized };
391 // Unpack newtype ABIs and find scalar pairs.
392 if sized && size.bytes() > 0 {
393 // All other fields must be ZSTs.
394 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
396 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397 // We have exactly one non-ZST field.
398 (Some((i, field)), None, None) => {
399 // Field fills the struct and it has a scalar or scalar pair ABI.
400 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
403 // For plain scalars, or vectors of them, we can't unpack
404 // newtypes for `#[repr(C)]`, as that affects C ABIs.
405 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406 abi = field.abi.clone();
408 // But scalar pairs are Rust-specific and get
409 // treated as aggregates by C ABIs anyway.
410 Abi::ScalarPair(..) => {
411 abi = field.abi.clone();
418 // Two non-ZST fields, and they're both scalars.
420 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
424 // Order by the memory placement, not source order.
425 let ((i, a), (j, b)) =
426 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427 let pair = self.scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430 assert_eq!(memory_index, &[0, 1]);
435 if offsets[i] == pair_offsets[0]
436 && offsets[j] == pair_offsets[1]
437 && align == pair.align
440 // We can use `ScalarPair` only when it matches our
441 // already computed layout (including `#[repr(C)]`).
450 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451 abi = Abi::Uninhabited;
455 variants: Variants::Single { index: VariantIdx::new(0) },
456 fields: FieldsShape::Arbitrary { offsets, memory_index },
464 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
466 let param_env = self.param_env;
467 let dl = self.data_layout();
468 let scalar_unit = |value: Primitive| {
469 let bits = value.size(dl).bits();
470 assert!(bits <= 128);
471 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
473 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
475 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
476 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
478 debug_assert!(!ty.has_infer_types_or_consts());
480 Ok(match *ty.kind() {
482 ty::Bool => tcx.intern_layout(Layout::scalar(
484 Scalar { value: Int(I8, false), valid_range: 0..=1 },
486 ty::Char => tcx.intern_layout(Layout::scalar(
488 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
490 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
492 ty::Float(fty) => scalar(match fty {
493 ast::FloatTy::F32 => F32,
494 ast::FloatTy::F64 => F64,
497 let mut ptr = scalar_unit(Pointer);
498 ptr.valid_range = 1..=*ptr.valid_range.end();
499 tcx.intern_layout(Layout::scalar(self, ptr))
503 ty::Never => tcx.intern_layout(Layout {
504 variants: Variants::Single { index: VariantIdx::new(0) },
505 fields: FieldsShape::Primitive,
506 abi: Abi::Uninhabited,
512 // Potentially-wide pointers.
513 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
524 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
525 let metadata = match unsized_part.kind() {
527 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
529 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
531 let mut vtable = scalar_unit(Pointer);
532 vtable.valid_range = 1..=*vtable.valid_range.end();
535 _ => return Err(LayoutError::Unknown(unsized_part)),
538 // Effectively a (ptr, meta) tuple.
539 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
542 // Arrays and slices.
543 ty::Array(element, mut count) => {
544 if count.has_projections() {
545 count = tcx.normalize_erasing_regions(param_env, count);
546 if count.has_projections() {
547 return Err(LayoutError::Unknown(ty));
551 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
552 let element = self.layout_of(element)?;
554 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
556 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
559 Abi::Aggregate { sized: true }
562 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
564 tcx.intern_layout(Layout {
565 variants: Variants::Single { index: VariantIdx::new(0) },
566 fields: FieldsShape::Array { stride: element.size, count },
569 align: element.align,
573 ty::Slice(element) => {
574 let element = self.layout_of(element)?;
575 tcx.intern_layout(Layout {
576 variants: Variants::Single { index: VariantIdx::new(0) },
577 fields: FieldsShape::Array { stride: element.size, count: 0 },
578 abi: Abi::Aggregate { sized: false },
580 align: element.align,
584 ty::Str => tcx.intern_layout(Layout {
585 variants: Variants::Single { index: VariantIdx::new(0) },
586 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
587 abi: Abi::Aggregate { sized: false },
594 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
595 ty::Dynamic(..) | ty::Foreign(..) => {
596 let mut unit = self.univariant_uninterned(
599 &ReprOptions::default(),
600 StructKind::AlwaysSized,
603 Abi::Aggregate { ref mut sized } => *sized = false,
606 tcx.intern_layout(unit)
609 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
611 ty::Closure(_, ref substs) => {
612 let tys = substs.as_closure().upvar_tys();
614 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
615 &ReprOptions::default(),
616 StructKind::AlwaysSized,
622 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
626 .map(|k| self.layout_of(k.expect_ty()))
627 .collect::<Result<Vec<_>, _>>()?,
628 &ReprOptions::default(),
633 // SIMD vector types.
634 ty::Adt(def, substs) if def.repr.simd() => {
635 // Supported SIMD vectors are homogeneous ADTs with at least one field:
637 // * #[repr(simd)] struct S(T, T, T, T);
638 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
639 // * #[repr(simd)] struct S([T; 4])
641 // where T is a primitive scalar (integer/float/pointer).
643 // SIMD vectors with zero fields are not supported.
644 // (should be caught by typeck)
645 if def.non_enum_variant().fields.is_empty() {
646 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
649 // Type of the first ADT field:
650 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
652 // Heterogeneous SIMD vectors are not supported:
653 // (should be caught by typeck)
654 for fi in &def.non_enum_variant().fields {
655 if fi.ty(tcx, substs) != f0_ty {
656 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
660 // The element type and number of elements of the SIMD vector
661 // are obtained from:
663 // * the element type and length of the single array field, if
664 // the first field is of array type, or
666 // * the homogenous field type and the number of fields.
667 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
668 // First ADT field is an array:
670 // SIMD vectors with multiple array fields are not supported:
671 // (should be caught by typeck)
672 if def.non_enum_variant().fields.len() != 1 {
673 tcx.sess.fatal(&format!(
674 "monomorphising SIMD type `{}` with more than one array field",
679 // Extract the number of elements from the layout of the array field:
680 let len = if let Ok(TyAndLayout {
681 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
683 }) = self.layout_of(f0_ty)
687 return Err(LayoutError::Unknown(ty));
692 // First ADT field is not an array:
693 (f0_ty, def.non_enum_variant().fields.len() as _, false)
696 // SIMD vectors of zero length are not supported.
697 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
700 // Can't be caught in typeck if the array length is generic.
702 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
703 } else if e_len > 65536 {
704 tcx.sess.fatal(&format!(
705 "monomorphising SIMD type `{}` of length greater than 65536",
710 // Compute the ABI of the element type:
711 let e_ly = self.layout_of(e_ty)?;
712 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
715 // This error isn't caught in typeck, e.g., if
716 // the element type of the vector is generic.
717 tcx.sess.fatal(&format!(
718 "monomorphising SIMD type `{}` with a non-primitive-scalar \
719 (integer/float/pointer) element type `{}`",
724 // Compute the size and alignment of the vector:
725 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
726 let align = dl.vector_align(size);
727 let size = size.align_to(align.abi);
729 // Compute the placement of the vector fields:
730 let fields = if is_array {
731 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
733 FieldsShape::Array { stride: e_ly.size, count: e_len }
736 tcx.intern_layout(Layout {
737 variants: Variants::Single { index: VariantIdx::new(0) },
739 abi: Abi::Vector { element: e_abi, count: e_len },
740 largest_niche: e_ly.largest_niche.clone(),
747 ty::Adt(def, substs) => {
748 // Cache the field layouts.
755 .map(|field| self.layout_of(field.ty(tcx, substs)))
756 .collect::<Result<Vec<_>, _>>()
758 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
761 if def.repr.pack.is_some() && def.repr.align.is_some() {
762 bug!("union cannot be packed and aligned");
766 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
768 if let Some(repr_align) = def.repr.align {
769 align = align.max(AbiAndPrefAlign::new(repr_align));
772 let optimize = !def.repr.inhibit_union_abi_opt();
773 let mut size = Size::ZERO;
774 let mut abi = Abi::Aggregate { sized: true };
775 let index = VariantIdx::new(0);
776 for field in &variants[index] {
777 assert!(!field.is_unsized());
778 align = align.max(field.align);
780 // If all non-ZST fields have the same ABI, forward this ABI
781 if optimize && !field.is_zst() {
782 // Normalize scalar_unit to the maximal valid range
783 let field_abi = match &field.abi {
784 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
785 Abi::ScalarPair(x, y) => {
786 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
788 Abi::Vector { element: x, count } => {
789 Abi::Vector { element: scalar_unit(x.value), count: *count }
791 Abi::Uninhabited | Abi::Aggregate { .. } => {
792 Abi::Aggregate { sized: true }
796 if size == Size::ZERO {
797 // first non ZST: initialize 'abi'
799 } else if abi != field_abi {
800 // different fields have different ABI: reset to Aggregate
801 abi = Abi::Aggregate { sized: true };
805 size = cmp::max(size, field.size);
808 if let Some(pack) = def.repr.pack {
809 align = align.min(AbiAndPrefAlign::new(pack));
812 return Ok(tcx.intern_layout(Layout {
813 variants: Variants::Single { index },
814 fields: FieldsShape::Union(
815 NonZeroUsize::new(variants[index].len())
816 .ok_or(LayoutError::Unknown(ty))?,
821 size: size.align_to(align.abi),
825 // A variant is absent if it's uninhabited and only has ZST fields.
826 // Present uninhabited variants only require space for their fields,
827 // but *not* an encoding of the discriminant (e.g., a tag value).
828 // See issue #49298 for more details on the need to leave space
829 // for non-ZST uninhabited data (mostly partial initialization).
830 let absent = |fields: &[TyAndLayout<'_>]| {
831 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
832 let is_zst = fields.iter().all(|f| f.is_zst());
833 uninhabited && is_zst
835 let (present_first, present_second) = {
836 let mut present_variants = variants
838 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
839 (present_variants.next(), present_variants.next())
841 let present_first = match present_first {
842 Some(present_first) => present_first,
843 // Uninhabited because it has no variants, or only absent ones.
844 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
845 // If it's a struct, still compute a layout so that we can still compute the
847 None => VariantIdx::new(0),
850 let is_struct = !def.is_enum() ||
851 // Only one variant is present.
852 (present_second.is_none() &&
853 // Representation optimizations are allowed.
854 !def.repr.inhibit_enum_layout_opt());
856 // Struct, or univariant enum equivalent to a struct.
857 // (Typechecking will reject discriminant-sizing attrs.)
859 let v = present_first;
860 let kind = if def.is_enum() || variants[v].is_empty() {
861 StructKind::AlwaysSized
863 let param_env = tcx.param_env(def.did);
864 let last_field = def.variants[v].fields.last().unwrap();
866 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
868 StructKind::MaybeUnsized
870 StructKind::AlwaysSized
874 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
875 st.variants = Variants::Single { index: v };
876 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
878 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
879 // the asserts ensure that we are not using the
880 // `#[rustc_layout_scalar_valid_range(n)]`
881 // attribute to widen the range of anything as that would probably
882 // result in UB somewhere
883 // FIXME(eddyb) the asserts are probably not needed,
884 // as larger validity ranges would result in missed
885 // optimizations, *not* wrongly assuming the inner
886 // value is valid. e.g. unions enlarge validity ranges,
887 // because the values may be uninitialized.
888 if let Bound::Included(start) = start {
889 // FIXME(eddyb) this might be incorrect - it doesn't
890 // account for wrap-around (end < start) ranges.
891 assert!(*scalar.valid_range.start() <= start);
892 scalar.valid_range = start..=*scalar.valid_range.end();
894 if let Bound::Included(end) = end {
895 // FIXME(eddyb) this might be incorrect - it doesn't
896 // account for wrap-around (end < start) ranges.
897 assert!(*scalar.valid_range.end() >= end);
898 scalar.valid_range = *scalar.valid_range.start()..=end;
901 // Update `largest_niche` if we have introduced a larger niche.
902 let niche = if def.repr.hide_niche() {
905 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
907 if let Some(niche) = niche {
908 match &st.largest_niche {
909 Some(largest_niche) => {
910 // Replace the existing niche even if they're equal,
911 // because this one is at a lower offset.
912 if largest_niche.available(dl) <= niche.available(dl) {
913 st.largest_niche = Some(niche);
916 None => st.largest_niche = Some(niche),
921 start == Bound::Unbounded && end == Bound::Unbounded,
922 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
928 return Ok(tcx.intern_layout(st));
931 // At this point, we have handled all unions and
932 // structs. (We have also handled univariant enums
933 // that allow representation optimization.)
934 assert!(def.is_enum());
936 // The current code for niche-filling relies on variant indices
937 // instead of actual discriminants, so dataful enums with
938 // explicit discriminants (RFC #2363) would misbehave.
939 let no_explicit_discriminants = def
942 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
944 let mut niche_filling_layout = None;
946 // Niche-filling enum optimization.
947 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
948 let mut dataful_variant = None;
949 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
951 // Find one non-ZST variant.
952 'variants: for (v, fields) in variants.iter_enumerated() {
958 if dataful_variant.is_none() {
959 dataful_variant = Some(v);
962 dataful_variant = None;
967 niche_variants = *niche_variants.start().min(&v)..=v;
970 if niche_variants.start() > niche_variants.end() {
971 dataful_variant = None;
974 if let Some(i) = dataful_variant {
975 let count = (niche_variants.end().as_u32()
976 - niche_variants.start().as_u32()
979 // Find the field with the largest niche
980 let niche_candidate = variants[i]
983 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
984 .max_by_key(|(_, niche)| niche.available(dl));
986 if let Some((field_index, niche, (niche_start, niche_scalar))) =
987 niche_candidate.and_then(|(field_index, niche)| {
988 Some((field_index, niche, niche.reserve(self, count)?))
991 let mut align = dl.aggregate_align;
995 let mut st = self.univariant_uninterned(
999 StructKind::AlwaysSized,
1001 st.variants = Variants::Single { index: j };
1003 align = align.max(st.align);
1007 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1009 let offset = st[i].fields.offset(field_index) + niche.offset;
1010 let size = st[i].size;
1012 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1016 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1017 Abi::ScalarPair(ref first, ref second) => {
1018 // We need to use scalar_unit to reset the
1019 // valid range to the maximal one for that
1020 // primitive, because only the niche is
1021 // guaranteed to be initialised, not the
1023 if offset.bytes() == 0 {
1025 niche_scalar.clone(),
1026 scalar_unit(second.value),
1030 scalar_unit(first.value),
1031 niche_scalar.clone(),
1035 _ => Abi::Aggregate { sized: true },
1040 Niche::from_scalar(dl, offset, niche_scalar.clone());
1042 niche_filling_layout = Some(Layout {
1043 variants: Variants::Multiple {
1045 tag_encoding: TagEncoding::Niche {
1053 fields: FieldsShape::Arbitrary {
1054 offsets: vec![offset],
1055 memory_index: vec![0],
1066 let (mut min, mut max) = (i128::MAX, i128::MIN);
1067 let discr_type = def.repr.discr_type();
1068 let bits = Integer::from_attr(self, discr_type).size().bits();
1069 for (i, discr) in def.discriminants(tcx) {
1070 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1073 let mut x = discr.val as i128;
1074 if discr_type.is_signed() {
1075 // sign extend the raw representation to be an i128
1076 x = (x << (128 - bits)) >> (128 - bits);
1085 // We might have no inhabited variants, so pretend there's at least one.
1086 if (min, max) == (i128::MAX, i128::MIN) {
1090 assert!(min <= max, "discriminant range is {}...{}", min, max);
1091 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1093 let mut align = dl.aggregate_align;
1094 let mut size = Size::ZERO;
1096 // We're interested in the smallest alignment, so start large.
1097 let mut start_align = Align::from_bytes(256).unwrap();
1098 assert_eq!(Integer::for_align(dl, start_align), None);
1100 // repr(C) on an enum tells us to make a (tag, union) layout,
1101 // so we need to grow the prefix alignment to be at least
1102 // the alignment of the union. (This value is used both for
1103 // determining the alignment of the overall enum, and the
1104 // determining the alignment of the payload after the tag.)
1105 let mut prefix_align = min_ity.align(dl).abi;
1107 for fields in &variants {
1108 for field in fields {
1109 prefix_align = prefix_align.max(field.align.abi);
1114 // Create the set of structs that represent each variant.
1115 let mut layout_variants = variants
1117 .map(|(i, field_layouts)| {
1118 let mut st = self.univariant_uninterned(
1122 StructKind::Prefixed(min_ity.size(), prefix_align),
1124 st.variants = Variants::Single { index: i };
1125 // Find the first field we can't move later
1126 // to make room for a larger discriminant.
1128 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1130 if !field.is_zst() || field.align.abi.bytes() != 1 {
1131 start_align = start_align.min(field.align.abi);
1135 size = cmp::max(size, st.size);
1136 align = align.max(st.align);
1139 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1141 // Align the maximum variant size to the largest alignment.
1142 size = size.align_to(align.abi);
1144 if size.bytes() >= dl.obj_size_bound() {
1145 return Err(LayoutError::SizeOverflow(ty));
1148 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1149 if typeck_ity < min_ity {
1150 // It is a bug if Layout decided on a greater discriminant size than typeck for
1151 // some reason at this point (based on values discriminant can take on). Mostly
1152 // because this discriminant will be loaded, and then stored into variable of
1153 // type calculated by typeck. Consider such case (a bug): typeck decided on
1154 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1155 // discriminant values. That would be a bug, because then, in codegen, in order
1156 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1157 // space necessary to represent would have to be discarded (or layout is wrong
1158 // on thinking it needs 16 bits)
1160 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1164 // However, it is fine to make discr type however large (as an optimisation)
1165 // after this point – we’ll just truncate the value we load in codegen.
1168 // Check to see if we should use a different type for the
1169 // discriminant. We can safely use a type with the same size
1170 // as the alignment of the first field of each variant.
1171 // We increase the size of the discriminant to avoid LLVM copying
1172 // padding when it doesn't need to. This normally causes unaligned
1173 // load/stores and excessive memcpy/memset operations. By using a
1174 // bigger integer size, LLVM can be sure about its contents and
1175 // won't be so conservative.
1177 // Use the initial field alignment
1178 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1181 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1184 // If the alignment is not larger than the chosen discriminant size,
1185 // don't use the alignment as the final size.
1189 // Patch up the variants' first few fields.
1190 let old_ity_size = min_ity.size();
1191 let new_ity_size = ity.size();
1192 for variant in &mut layout_variants {
1193 match variant.fields {
1194 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1196 if *i <= old_ity_size {
1197 assert_eq!(*i, old_ity_size);
1201 // We might be making the struct larger.
1202 if variant.size <= old_ity_size {
1203 variant.size = new_ity_size;
1211 let tag_mask = !0u128 >> (128 - ity.size().bits());
1213 value: Int(ity, signed),
1214 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1216 let mut abi = Abi::Aggregate { sized: true };
1217 if tag.value.size(dl) == size {
1218 abi = Abi::Scalar(tag.clone());
1220 // Try to use a ScalarPair for all tagged enums.
1221 let mut common_prim = None;
1222 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1223 let offsets = match layout_variant.fields {
1224 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1228 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
1229 let (field, offset) = match (fields.next(), fields.next()) {
1230 (None, None) => continue,
1231 (Some(pair), None) => pair,
1237 let prim = match field.abi {
1238 Abi::Scalar(ref scalar) => scalar.value,
1244 if let Some(pair) = common_prim {
1245 // This is pretty conservative. We could go fancier
1246 // by conflating things like i32 and u32, or even
1247 // realising that (u8, u8) could just cohabit with
1249 if pair != (prim, offset) {
1254 common_prim = Some((prim, offset));
1257 if let Some((prim, offset)) = common_prim {
1258 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1259 let pair_offsets = match pair.fields {
1260 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1261 assert_eq!(memory_index, &[0, 1]);
1266 if pair_offsets[0] == Size::ZERO
1267 && pair_offsets[1] == *offset
1268 && align == pair.align
1269 && size == pair.size
1271 // We can use `ScalarPair` only when it matches our
1272 // already computed layout (including `#[repr(C)]`).
1278 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1279 abi = Abi::Uninhabited;
1282 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1284 let tagged_layout = Layout {
1285 variants: Variants::Multiple {
1287 tag_encoding: TagEncoding::Direct,
1289 variants: layout_variants,
1291 fields: FieldsShape::Arbitrary {
1292 offsets: vec![Size::ZERO],
1293 memory_index: vec![0],
1301 let best_layout = match (tagged_layout, niche_filling_layout) {
1302 (tagged_layout, Some(niche_filling_layout)) => {
1303 // Pick the smaller layout; otherwise,
1304 // pick the layout with the larger niche; otherwise,
1305 // pick tagged as it has simpler codegen.
1306 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1308 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1309 (layout.size, cmp::Reverse(niche_size))
1312 (tagged_layout, None) => tagged_layout,
1315 tcx.intern_layout(best_layout)
1318 // Types with no meaningful known layout.
1319 ty::Projection(_) | ty::Opaque(..) => {
1320 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1321 if ty == normalized {
1322 return Err(LayoutError::Unknown(ty));
1324 tcx.layout_raw(param_env.and(normalized))?
1327 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1328 bug!("Layout::compute: unexpected type `{}`", ty)
1331 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1332 return Err(LayoutError::Unknown(ty));
1338 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1339 #[derive(Clone, Debug, PartialEq)]
1340 enum SavedLocalEligibility {
1342 Assigned(VariantIdx),
1343 // FIXME: Use newtype_index so we aren't wasting bytes
1344 Ineligible(Option<u32>),
1347 // When laying out generators, we divide our saved local fields into two
1348 // categories: overlap-eligible and overlap-ineligible.
1350 // Those fields which are ineligible for overlap go in a "prefix" at the
1351 // beginning of the layout, and always have space reserved for them.
1353 // Overlap-eligible fields are only assigned to one variant, so we lay
1354 // those fields out for each variant and put them right after the
1357 // Finally, in the layout details, we point to the fields from the
1358 // variants they are assigned to. It is possible for some fields to be
1359 // included in multiple variants. No field ever "moves around" in the
1360 // layout; its offset is always the same.
1362 // Also included in the layout are the upvars and the discriminant.
1363 // These are included as fields on the "outer" layout; they are not part
1365 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1366 /// Compute the eligibility and assignment of each local.
1367 fn generator_saved_local_eligibility(
1369 info: &GeneratorLayout<'tcx>,
1370 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1371 use SavedLocalEligibility::*;
1373 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1374 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1376 // The saved locals not eligible for overlap. These will get
1377 // "promoted" to the prefix of our generator.
1378 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1380 // Figure out which of our saved locals are fields in only
1381 // one variant. The rest are deemed ineligible for overlap.
1382 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1383 for local in fields {
1384 match assignments[*local] {
1386 assignments[*local] = Assigned(variant_index);
1389 // We've already seen this local at another suspension
1390 // point, so it is no longer a candidate.
1392 "removing local {:?} in >1 variant ({:?}, {:?})",
1397 ineligible_locals.insert(*local);
1398 assignments[*local] = Ineligible(None);
1405 // Next, check every pair of eligible locals to see if they
1407 for local_a in info.storage_conflicts.rows() {
1408 let conflicts_a = info.storage_conflicts.count(local_a);
1409 if ineligible_locals.contains(local_a) {
1413 for local_b in info.storage_conflicts.iter(local_a) {
1414 // local_a and local_b are storage live at the same time, therefore they
1415 // cannot overlap in the generator layout. The only way to guarantee
1416 // this is if they are in the same variant, or one is ineligible
1417 // (which means it is stored in every variant).
1418 if ineligible_locals.contains(local_b)
1419 || assignments[local_a] == assignments[local_b]
1424 // If they conflict, we will choose one to make ineligible.
1425 // This is not always optimal; it's just a greedy heuristic that
1426 // seems to produce good results most of the time.
1427 let conflicts_b = info.storage_conflicts.count(local_b);
1428 let (remove, other) =
1429 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1430 ineligible_locals.insert(remove);
1431 assignments[remove] = Ineligible(None);
1432 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1436 // Count the number of variants in use. If only one of them, then it is
1437 // impossible to overlap any locals in our layout. In this case it's
1438 // always better to make the remaining locals ineligible, so we can
1439 // lay them out with the other locals in the prefix and eliminate
1440 // unnecessary padding bytes.
1442 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1443 for assignment in &assignments {
1444 if let Assigned(idx) = assignment {
1445 used_variants.insert(*idx);
1448 if used_variants.count() < 2 {
1449 for assignment in assignments.iter_mut() {
1450 *assignment = Ineligible(None);
1452 ineligible_locals.insert_all();
1456 // Write down the order of our locals that will be promoted to the prefix.
1458 for (idx, local) in ineligible_locals.iter().enumerate() {
1459 assignments[local] = Ineligible(Some(idx as u32));
1462 debug!("generator saved local assignments: {:?}", assignments);
1464 (ineligible_locals, assignments)
1467 /// Compute the full generator layout.
1468 fn generator_layout(
1471 def_id: hir::def_id::DefId,
1472 substs: SubstsRef<'tcx>,
1473 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1474 use SavedLocalEligibility::*;
1477 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1479 let info = tcx.generator_layout(def_id);
1480 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1482 // Build a prefix layout, including "promoting" all ineligible
1483 // locals as part of the prefix. We compute the layout of all of
1484 // these fields at once to get optimal packing.
1485 let tag_index = substs.as_generator().prefix_tys().count();
1487 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1488 let max_discr = (info.variant_fields.len() - 1) as u128;
1489 let discr_int = Integer::fit_unsigned(max_discr);
1490 let discr_int_ty = discr_int.to_ty(tcx, false);
1491 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1492 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1493 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1495 let promoted_layouts = ineligible_locals
1497 .map(|local| subst_field(info.field_tys[local]))
1498 .map(|ty| tcx.mk_maybe_uninit(ty))
1499 .map(|ty| self.layout_of(ty));
1500 let prefix_layouts = substs
1503 .map(|ty| self.layout_of(ty))
1504 .chain(iter::once(Ok(tag_layout)))
1505 .chain(promoted_layouts)
1506 .collect::<Result<Vec<_>, _>>()?;
1507 let prefix = self.univariant_uninterned(
1510 &ReprOptions::default(),
1511 StructKind::AlwaysSized,
1514 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1516 // Split the prefix layout into the "outer" fields (upvars and
1517 // discriminant) and the "promoted" fields. Promoted fields will
1518 // get included in each variant that requested them in
1520 debug!("prefix = {:#?}", prefix);
1521 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1522 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1523 let mut inverse_memory_index = invert_mapping(&memory_index);
1525 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1526 // "outer" and "promoted" fields respectively.
1527 let b_start = (tag_index + 1) as u32;
1528 let offsets_b = offsets.split_off(b_start as usize);
1529 let offsets_a = offsets;
1531 // Disentangle the "a" and "b" components of `inverse_memory_index`
1532 // by preserving the order but keeping only one disjoint "half" each.
1533 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1534 let inverse_memory_index_b: Vec<_> =
1535 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1536 inverse_memory_index.retain(|&i| i < b_start);
1537 let inverse_memory_index_a = inverse_memory_index;
1539 // Since `inverse_memory_index_{a,b}` each only refer to their
1540 // respective fields, they can be safely inverted
1541 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1542 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1545 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1546 (outer_fields, offsets_b, memory_index_b)
1551 let mut size = prefix.size;
1552 let mut align = prefix.align;
1556 .map(|(index, variant_fields)| {
1557 // Only include overlap-eligible fields when we compute our variant layout.
1558 let variant_only_tys = variant_fields
1560 .filter(|local| match assignments[**local] {
1561 Unassigned => bug!(),
1562 Assigned(v) if v == index => true,
1563 Assigned(_) => bug!("assignment does not match variant"),
1564 Ineligible(_) => false,
1566 .map(|local| subst_field(info.field_tys[*local]));
1568 let mut variant = self.univariant_uninterned(
1571 .map(|ty| self.layout_of(ty))
1572 .collect::<Result<Vec<_>, _>>()?,
1573 &ReprOptions::default(),
1574 StructKind::Prefixed(prefix_size, prefix_align.abi),
1576 variant.variants = Variants::Single { index };
1578 let (offsets, memory_index) = match variant.fields {
1579 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1583 // Now, stitch the promoted and variant-only fields back together in
1584 // the order they are mentioned by our GeneratorLayout.
1585 // Because we only use some subset (that can differ between variants)
1586 // of the promoted fields, we can't just pick those elements of the
1587 // `promoted_memory_index` (as we'd end up with gaps).
1588 // So instead, we build an "inverse memory_index", as if all of the
1589 // promoted fields were being used, but leave the elements not in the
1590 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1591 // obtain a valid (bijective) mapping.
1592 const INVALID_FIELD_IDX: u32 = !0;
1593 let mut combined_inverse_memory_index =
1594 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1595 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1596 let combined_offsets = variant_fields
1600 let (offset, memory_index) = match assignments[*local] {
1601 Unassigned => bug!(),
1603 let (offset, memory_index) =
1604 offsets_and_memory_index.next().unwrap();
1605 (offset, promoted_memory_index.len() as u32 + memory_index)
1607 Ineligible(field_idx) => {
1608 let field_idx = field_idx.unwrap() as usize;
1609 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1612 combined_inverse_memory_index[memory_index as usize] = i as u32;
1617 // Remove the unused slots and invert the mapping to obtain the
1618 // combined `memory_index` (also see previous comment).
1619 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1620 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1622 variant.fields = FieldsShape::Arbitrary {
1623 offsets: combined_offsets,
1624 memory_index: combined_memory_index,
1627 size = size.max(variant.size);
1628 align = align.max(variant.align);
1631 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1633 size = size.align_to(align.abi);
1635 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1639 Abi::Aggregate { sized: true }
1642 let layout = tcx.intern_layout(Layout {
1643 variants: Variants::Multiple {
1645 tag_encoding: TagEncoding::Direct,
1646 tag_field: tag_index,
1649 fields: outer_fields,
1651 largest_niche: prefix.largest_niche,
1655 debug!("generator layout ({:?}): {:#?}", ty, layout);
1659 /// This is invoked by the `layout_raw` query to record the final
1660 /// layout of each type.
1662 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1663 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1664 // for dumping later.
1665 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1666 self.record_layout_for_printing_outlined(layout)
1670 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1671 // Ignore layouts that are done with non-empty environments or
1672 // non-monomorphic layouts, as the user only wants to see the stuff
1673 // resulting from the final codegen session.
1674 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1678 // (delay format until we actually need it)
1679 let record = |kind, packed, opt_discr_size, variants| {
1680 let type_desc = format!("{:?}", layout.ty);
1681 self.tcx.sess.code_stats.record_type_size(
1692 let adt_def = match *layout.ty.kind() {
1693 ty::Adt(ref adt_def, _) => {
1694 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1698 ty::Closure(..) => {
1699 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1700 record(DataTypeKind::Closure, false, None, vec![]);
1705 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1710 let adt_kind = adt_def.adt_kind();
1711 let adt_packed = adt_def.repr.pack.is_some();
1713 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1714 let mut min_size = Size::ZERO;
1715 let field_info: Vec<_> = flds
1718 .map(|(i, &name)| match layout.field(self, i) {
1720 bug!("no layout found for field {}: `{:?}`", name, err);
1722 Ok(field_layout) => {
1723 let offset = layout.fields.offset(i);
1724 let field_end = offset + field_layout.size;
1725 if min_size < field_end {
1726 min_size = field_end;
1729 name: name.to_string(),
1730 offset: offset.bytes(),
1731 size: field_layout.size.bytes(),
1732 align: field_layout.align.abi.bytes(),
1739 name: n.map(|n| n.to_string()),
1740 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1741 align: layout.align.abi.bytes(),
1742 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1747 match layout.variants {
1748 Variants::Single { index } => {
1749 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1750 if !adt_def.variants.is_empty() {
1751 let variant_def = &adt_def.variants[index];
1752 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1757 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1760 // (This case arises for *empty* enums; so give it
1762 record(adt_kind.into(), adt_packed, None, vec![]);
1766 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1768 "print-type-size `{:#?}` adt general variants def {}",
1770 adt_def.variants.len()
1772 let variant_infos: Vec<_> = adt_def
1775 .map(|(i, variant_def)| {
1776 let fields: Vec<_> =
1777 variant_def.fields.iter().map(|f| f.ident.name).collect();
1779 Some(variant_def.ident),
1781 layout.for_variant(self, i),
1788 match tag_encoding {
1789 TagEncoding::Direct => Some(tag.value.size(self)),
1799 /// Type size "skeleton", i.e., the only information determining a type's size.
1800 /// While this is conservative, (aside from constant sizes, only pointers,
1801 /// newtypes thereof and null pointer optimized enums are allowed), it is
1802 /// enough to statically check common use cases of transmute.
1803 #[derive(Copy, Clone, Debug)]
1804 pub enum SizeSkeleton<'tcx> {
1805 /// Any statically computable Layout.
1808 /// A potentially-fat pointer.
1810 /// If true, this pointer is never null.
1812 /// The type which determines the unsized metadata, if any,
1813 /// of this pointer. Either a type parameter or a projection
1814 /// depending on one, with regions erased.
1819 impl<'tcx> SizeSkeleton<'tcx> {
1823 param_env: ty::ParamEnv<'tcx>,
1824 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1825 debug_assert!(!ty.has_infer_types_or_consts());
1827 // First try computing a static layout.
1828 let err = match tcx.layout_of(param_env.and(ty)) {
1830 return Ok(SizeSkeleton::Known(layout.size));
1836 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1837 let non_zero = !ty.is_unsafe_ptr();
1838 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1840 ty::Param(_) | ty::Projection(_) => {
1841 debug_assert!(tail.has_param_types_or_consts());
1842 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1845 "SizeSkeleton::compute({}): layout errored ({}), yet \
1846 tail `{}` is not a type parameter or a projection",
1854 ty::Adt(def, substs) => {
1855 // Only newtypes and enums w/ nullable pointer optimization.
1856 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1860 // Get a zero-sized variant or a pointer newtype.
1861 let zero_or_ptr_variant = |i| {
1862 let i = VariantIdx::new(i);
1863 let fields = def.variants[i]
1866 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1868 for field in fields {
1871 SizeSkeleton::Known(size) => {
1872 if size.bytes() > 0 {
1876 SizeSkeleton::Pointer { .. } => {
1887 let v0 = zero_or_ptr_variant(0)?;
1889 if def.variants.len() == 1 {
1890 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1891 return Ok(SizeSkeleton::Pointer {
1893 || match tcx.layout_scalar_valid_range(def.did) {
1894 (Bound::Included(start), Bound::Unbounded) => start > 0,
1895 (Bound::Included(start), Bound::Included(end)) => {
1896 0 < start && start < end
1907 let v1 = zero_or_ptr_variant(1)?;
1908 // Nullable pointer enum optimization.
1910 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1911 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1912 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1918 ty::Projection(_) | ty::Opaque(..) => {
1919 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1920 if ty == normalized {
1923 SizeSkeleton::compute(normalized, tcx, param_env)
1931 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1932 match (self, other) {
1933 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1934 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1942 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1943 fn tcx(&self) -> TyCtxt<'tcx>;
1946 pub trait HasParamEnv<'tcx> {
1947 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1950 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1951 fn data_layout(&self) -> &TargetDataLayout {
1956 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1957 fn tcx(&self) -> TyCtxt<'tcx> {
1962 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1963 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1968 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1969 fn data_layout(&self) -> &TargetDataLayout {
1970 self.tcx.data_layout()
1974 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1975 fn tcx(&self) -> TyCtxt<'tcx> {
1980 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
1982 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1984 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
1986 /// Computes the layout of a type. Note that this implicitly
1987 /// executes in "reveal all" mode.
1988 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
1989 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
1990 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1991 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1992 let layout = TyAndLayout { ty, layout };
1994 // N.B., this recording is normally disabled; when enabled, it
1995 // can however trigger recursive invocations of `layout_of`.
1996 // Therefore, we execute it *after* the main query has
1997 // completed, to avoid problems around recursive structures
1998 // and the like. (Admittedly, I wasn't able to reproduce a problem
1999 // here, but it seems like the right thing to do. -nmatsakis)
2000 self.record_layout_for_printing(layout);
2006 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2008 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2010 /// Computes the layout of a type. Note that this implicitly
2011 /// executes in "reveal all" mode.
2012 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2013 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2014 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2015 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2016 let layout = TyAndLayout { ty, layout };
2018 // N.B., this recording is normally disabled; when enabled, it
2019 // can however trigger recursive invocations of `layout_of`.
2020 // Therefore, we execute it *after* the main query has
2021 // completed, to avoid problems around recursive structures
2022 // and the like. (Admittedly, I wasn't able to reproduce a problem
2023 // here, but it seems like the right thing to do. -nmatsakis)
2024 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2025 cx.record_layout_for_printing(layout);
2031 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2033 /// Computes the layout of a type. Note that this implicitly
2034 /// executes in "reveal all" mode.
2038 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2039 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2040 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2041 cx.layout_of(param_env_and_ty.value)
2045 impl ty::query::TyCtxtAt<'tcx> {
2046 /// Computes the layout of a type. Note that this implicitly
2047 /// executes in "reveal all" mode.
2051 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2052 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2053 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2054 cx.layout_of(param_env_and_ty.value)
2058 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2060 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2062 + HasParamEnv<'tcx>,
2065 this: TyAndLayout<'tcx>,
2067 variant_index: VariantIdx,
2068 ) -> TyAndLayout<'tcx> {
2069 let layout = match this.variants {
2070 Variants::Single { index }
2071 // If all variants but one are uninhabited, the variant layout is the enum layout.
2072 if index == variant_index &&
2073 // Don't confuse variants of uninhabited enums with the enum itself.
2074 // For more details see https://github.com/rust-lang/rust/issues/69763.
2075 this.fields != FieldsShape::Primitive =>
2080 Variants::Single { index } => {
2081 // Deny calling for_variant more than once for non-Single enums.
2082 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2083 assert_eq!(original_layout.variants, Variants::Single { index });
2086 let fields = match this.ty.kind() {
2087 ty::Adt(def, _) if def.variants.is_empty() =>
2088 bug!("for_variant called on zero-variant enum"),
2089 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2093 tcx.intern_layout(Layout {
2094 variants: Variants::Single { index: variant_index },
2095 fields: match NonZeroUsize::new(fields) {
2096 Some(fields) => FieldsShape::Union(fields),
2097 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2099 abi: Abi::Uninhabited,
2100 largest_niche: None,
2101 align: tcx.data_layout.i8_align,
2106 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2109 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2111 TyAndLayout { ty: this.ty, layout }
2114 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2115 enum TyMaybeWithLayout<C: LayoutOf> {
2117 TyAndLayout(C::TyAndLayout),
2120 fn ty_and_layout_kind<
2121 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2123 + HasParamEnv<'tcx>,
2125 this: TyAndLayout<'tcx>,
2129 ) -> TyMaybeWithLayout<C> {
2131 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2132 let layout = Layout::scalar(cx, tag.clone());
2133 MaybeResult::from(Ok(TyAndLayout {
2134 layout: tcx.intern_layout(layout),
2135 ty: tag.value.to_ty(tcx),
2148 | ty::GeneratorWitness(..)
2150 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2152 // Potentially-fat pointers.
2153 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2154 assert!(i < this.fields.count());
2156 // Reuse the fat `*T` type as its own thin pointer data field.
2157 // This provides information about, e.g., DST struct pointees
2158 // (which may have no non-DST form), and will work as long
2159 // as the `Abi` or `FieldsShape` is checked by users.
2161 let nil = tcx.mk_unit();
2162 let ptr_ty = if ty.is_unsafe_ptr() {
2165 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2167 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2168 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2175 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2176 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2177 ty::Dynamic(_, _) => {
2178 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2179 tcx.lifetimes.re_static,
2180 tcx.mk_array(tcx.types.usize, 3),
2182 /* FIXME: use actual fn pointers
2183 Warning: naively computing the number of entries in the
2184 vtable by counting the methods on the trait + methods on
2185 all parent traits does not work, because some methods can
2186 be not object safe and thus excluded from the vtable.
2187 Increase this counter if you tried to implement this but
2188 failed to do it without duplicating a lot of code from
2189 other places in the compiler: 2
2191 tcx.mk_array(tcx.types.usize, 3),
2192 tcx.mk_array(Option<fn()>),
2196 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2200 // Arrays and slices.
2201 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2202 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2204 // Tuples, generators and closures.
2205 ty::Closure(_, ref substs) => {
2206 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2209 ty::Generator(def_id, ref substs, _) => match this.variants {
2210 Variants::Single { index } => TyMaybeWithLayout::Ty(
2213 .state_tys(def_id, tcx)
2214 .nth(index.as_usize())
2219 Variants::Multiple { ref tag, tag_field, .. } => {
2221 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2223 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2227 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2230 ty::Adt(def, substs) => {
2231 match this.variants {
2232 Variants::Single { index } => {
2233 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2236 // Discriminant field for enums (where applicable).
2237 Variants::Multiple { ref tag, .. } => {
2239 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2246 | ty::Placeholder(..)
2250 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2254 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2255 TyMaybeWithLayout::Ty(result) => result,
2256 TyMaybeWithLayout::TyAndLayout(result) => return result,
2260 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2261 let addr_space_of_ty = |ty: Ty<'tcx>| {
2262 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2265 let pointee_info = match *this.ty.kind() {
2266 ty::RawPtr(mt) if offset.bytes() == 0 => {
2267 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2269 align: layout.align.abi,
2271 address_space: addr_space_of_ty(mt.ty),
2274 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2275 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2278 align: layout.align.abi,
2280 address_space: cx.data_layout().instruction_address_space,
2284 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2285 let address_space = addr_space_of_ty(ty);
2287 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
2288 let kind = match mt {
2289 hir::Mutability::Not => {
2296 hir::Mutability::Mut => {
2297 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2298 // panic=abort mode. That was deemed right, as prior versions had many bugs
2299 // in conjunction with unwinding, but later versions didn’t seem to have
2300 // said issues. See issue #31681.
2302 // Alas, later on we encountered a case where noalias would generate wrong
2303 // code altogether even with recent versions of LLVM in *safe* code with no
2304 // unwinding involved. See #54462.
2306 // For now, do not enable mutable_noalias by default at all, while the
2307 // issue is being figured out.
2308 if tcx.sess.opts.debugging_opts.mutable_noalias {
2309 PointerKind::UniqueBorrowed
2316 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2318 align: layout.align.abi,
2325 let mut data_variant = match this.variants {
2326 // Within the discriminant field, only the niche itself is
2327 // always initialized, so we only check for a pointer at its
2330 // If the niche is a pointer, it's either valid (according
2331 // to its type), or null (which the niche field's scalar
2332 // validity range encodes). This allows using
2333 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2334 // this will continue to work as long as we don't start
2335 // using more niches than just null (e.g., the first page of
2336 // the address space, or unaligned pointers).
2337 Variants::Multiple {
2338 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2341 } if this.fields.offset(tag_field) == offset => {
2342 Some(this.for_variant(cx, dataful_variant))
2347 if let Some(variant) = data_variant {
2348 // We're not interested in any unions.
2349 if let FieldsShape::Union(_) = variant.fields {
2350 data_variant = None;
2354 let mut result = None;
2356 if let Some(variant) = data_variant {
2357 let ptr_end = offset + Pointer.size(cx);
2358 for i in 0..variant.fields.count() {
2359 let field_start = variant.fields.offset(i);
2360 if field_start <= offset {
2361 let field = variant.field(cx, i);
2362 result = field.to_result().ok().and_then(|field| {
2363 if ptr_end <= field_start + field.size {
2364 // We found the right field, look inside it.
2366 field.pointee_info_at(cx, offset - field_start);
2372 if result.is_some() {
2379 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2380 if let Some(ref mut pointee) = result {
2381 if let ty::Adt(def, _) = this.ty.kind() {
2382 if def.is_box() && offset.bytes() == 0 {
2383 pointee.safe = Some(PointerKind::UniqueOwned);
2393 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2403 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2404 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2405 use crate::ty::layout::LayoutError::*;
2406 mem::discriminant(self).hash_stable(hcx, hasher);
2409 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2414 impl<'tcx> ty::Instance<'tcx> {
2415 // NOTE(eddyb) this is private to avoid using it from outside of
2416 // `FnAbi::of_instance` - any other uses are either too high-level
2417 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2418 // or should go through `FnAbi` instead, to avoid losing any
2419 // adjustments `FnAbi::of_instance` might be performing.
2420 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2421 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2422 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2425 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2426 // parameters unused if they show up in the signature, but not in the `mir::Body`
2427 // (i.e. due to being inside a projection that got normalized, see
2428 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2429 // track of a polymorphization `ParamEnv` to allow normalizing later.
2430 let mut sig = match *ty.kind() {
2431 ty::FnDef(def_id, substs) => tcx
2432 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2433 .subst(tcx, substs),
2434 _ => unreachable!(),
2437 if let ty::InstanceDef::VtableShim(..) = self.def {
2438 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2439 sig = sig.map_bound(|mut sig| {
2440 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2441 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2442 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2448 ty::Closure(def_id, substs) => {
2449 let sig = substs.as_closure().sig();
2451 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2452 sig.map_bound(|sig| {
2454 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2462 ty::Generator(_, substs, _) => {
2463 let sig = substs.as_generator().poly_sig();
2465 let br = ty::BoundRegion { kind: ty::BrEnv };
2466 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2467 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2469 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2470 let pin_adt_ref = tcx.adt_def(pin_did);
2471 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2472 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2474 sig.map_bound(|sig| {
2475 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2476 let state_adt_ref = tcx.adt_def(state_did);
2478 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2479 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2482 [env_ty, sig.resume_ty].iter(),
2485 hir::Unsafety::Normal,
2486 rustc_target::spec::abi::Abi::Rust,
2490 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2495 pub trait FnAbiExt<'tcx, C>
2497 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2501 + HasParamEnv<'tcx>,
2503 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2505 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2506 /// instead, where the instance is a `InstanceDef::Virtual`.
2507 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2509 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2510 /// direct calls to an `fn`.
2512 /// NB: that includes virtual calls, which are represented by "direct calls"
2513 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2514 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2518 sig: ty::PolyFnSig<'tcx>,
2519 extra_args: &[Ty<'tcx>],
2520 caller_location: Option<Ty<'tcx>>,
2521 codegen_fn_attr_flags: CodegenFnAttrFlags,
2522 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2524 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2528 panic_strategy: PanicStrategy,
2529 codegen_fn_attr_flags: CodegenFnAttrFlags,
2532 if panic_strategy != PanicStrategy::Unwind {
2533 // In panic=abort mode we assume nothing can unwind anywhere, so
2534 // optimize based on this!
2536 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2537 // If a specific #[unwind] attribute is present, use that.
2539 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2540 // Special attribute for allocator functions, which can't unwind.
2543 if call_conv == Conv::Rust {
2544 // Any Rust method (or `extern "Rust" fn` or `extern
2545 // "rust-call" fn`) is explicitly allowed to unwind
2546 // (unless it has no-unwind attribute, handled above).
2549 // Anything else is either:
2551 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2553 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2555 // Foreign items (case 1) are assumed to not unwind; it is
2556 // UB otherwise. (At least for now; see also
2557 // rust-lang/rust#63909 and Rust RFC 2753.)
2559 // Items defined in Rust with non-Rust ABIs (case 2) are also
2560 // not supposed to unwind. Whether this should be enforced
2561 // (versus stating it is UB) and *how* it would be enforced
2562 // is currently under discussion; see rust-lang/rust#58794.
2564 // In either case, we mark item as explicitly nounwind.
2570 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2572 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2576 + HasParamEnv<'tcx>,
2578 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2579 // Assume that fn pointers may always unwind
2580 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2582 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2583 ArgAbi::new(cx.layout_of(ty))
2587 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2588 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2590 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2591 Some(cx.tcx().caller_location_ty())
2596 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2598 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
2599 let mut layout = cx.layout_of(ty);
2600 // Don't pass the vtable, it's not an argument of the virtual fn.
2601 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2602 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2603 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
2604 let fat_pointer_ty = if layout.is_unsized() {
2605 // unsized `self` is passed as a pointer to `self`
2606 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2607 cx.tcx().mk_mut_ptr(layout.ty)
2610 Abi::ScalarPair(..) => (),
2611 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2614 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2615 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2616 // elsewhere in the compiler as a method on a `dyn Trait`.
2617 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2618 // get a built-in pointer type
2619 let mut fat_pointer_layout = layout;
2620 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2621 && !fat_pointer_layout.ty.is_region_ptr()
2623 for i in 0..fat_pointer_layout.fields.count() {
2624 let field_layout = fat_pointer_layout.field(cx, i);
2626 if !field_layout.is_zst() {
2627 fat_pointer_layout = field_layout;
2628 continue 'descend_newtypes;
2632 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2635 fat_pointer_layout.ty
2638 // we now have a type like `*mut RcBox<dyn Trait>`
2639 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2640 // this is understood as a special case elsewhere in the compiler
2641 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2642 layout = cx.layout_of(unit_pointer_ty);
2643 layout.ty = fat_pointer_ty;
2651 sig: ty::PolyFnSig<'tcx>,
2652 extra_args: &[Ty<'tcx>],
2653 caller_location: Option<Ty<'tcx>>,
2654 codegen_fn_attr_flags: CodegenFnAttrFlags,
2655 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
2657 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2659 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2661 use rustc_target::spec::abi::Abi::*;
2662 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2663 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2665 // It's the ABI's job to select this, not ours.
2666 System => bug!("system abi should be selected elsewhere"),
2667 EfiApi => bug!("eficall abi should be selected elsewhere"),
2669 Stdcall => Conv::X86Stdcall,
2670 Fastcall => Conv::X86Fastcall,
2671 Vectorcall => Conv::X86VectorCall,
2672 Thiscall => Conv::X86ThisCall,
2674 Unadjusted => Conv::C,
2675 Win64 => Conv::X86_64Win64,
2676 SysV64 => Conv::X86_64SysV,
2677 Aapcs => Conv::ArmAapcs,
2678 PtxKernel => Conv::PtxKernel,
2679 Msp430Interrupt => Conv::Msp430Intr,
2680 X86Interrupt => Conv::X86Intr,
2681 AmdGpuKernel => Conv::AmdGpuKernel,
2682 AvrInterrupt => Conv::AvrInterrupt,
2683 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2685 // These API constants ought to be more specific...
2689 let mut inputs = sig.inputs();
2690 let extra_args = if sig.abi == RustCall {
2691 assert!(!sig.c_variadic && extra_args.is_empty());
2693 if let Some(input) = sig.inputs().last() {
2694 if let ty::Tuple(tupled_arguments) = input.kind() {
2695 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2696 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2699 "argument to function with \"rust-call\" ABI \
2705 "argument to function with \"rust-call\" ABI \
2710 assert!(sig.c_variadic || extra_args.is_empty());
2714 let target = &cx.tcx().sess.target;
2715 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2716 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2717 let linux_s390x_gnu_like =
2718 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2719 let linux_sparc64_gnu_like =
2720 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2721 let linux_powerpc_gnu_like =
2722 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2723 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2725 // Handle safe Rust thin and fat pointers.
2726 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2728 layout: TyAndLayout<'tcx>,
2731 // Booleans are always an i1 that needs to be zero-extended.
2732 if scalar.is_bool() {
2733 attrs.ext(ArgExtension::Zext);
2737 // Only pointer types handled below.
2738 if scalar.value != Pointer {
2742 if scalar.valid_range.start() < scalar.valid_range.end() {
2743 if *scalar.valid_range.start() > 0 {
2744 attrs.set(ArgAttribute::NonNull);
2748 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2749 if let Some(kind) = pointee.safe {
2750 attrs.pointee_align = Some(pointee.align);
2752 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2753 // for the entire duration of the function as they can be deallocated
2754 // at any time. Set their valid size to 0.
2755 attrs.pointee_size = match kind {
2756 PointerKind::UniqueOwned => Size::ZERO,
2760 // `Box` pointer parameters never alias because ownership is transferred
2761 // `&mut` pointer parameters never alias other parameters,
2762 // or mutable global data
2764 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2765 // and can be marked as both `readonly` and `noalias`, as
2766 // LLVM's definition of `noalias` is based solely on memory
2767 // dependencies rather than pointer equality
2768 let no_alias = match kind {
2769 PointerKind::Shared => false,
2770 PointerKind::UniqueOwned => true,
2771 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2774 attrs.set(ArgAttribute::NoAlias);
2777 if kind == PointerKind::Frozen && !is_return {
2778 attrs.set(ArgAttribute::ReadOnly);
2784 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2785 let is_return = arg_idx.is_none();
2786 let mut arg = mk_arg_type(ty, arg_idx);
2787 if arg.layout.is_zst() {
2788 // For some forsaken reason, x86_64-pc-windows-gnu
2789 // doesn't ignore zero-sized struct arguments.
2790 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2794 && !linux_s390x_gnu_like
2795 && !linux_sparc64_gnu_like
2796 && !linux_powerpc_gnu_like)
2798 arg.mode = PassMode::Ignore;
2802 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2803 if !is_return && rust_abi {
2804 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2805 let mut a_attrs = ArgAttributes::new();
2806 let mut b_attrs = ArgAttributes::new();
2807 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2808 adjust_for_rust_scalar(
2812 a.value.size(cx).align_to(b.value.align(cx).abi),
2815 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2820 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2821 if let PassMode::Direct(ref mut attrs) = arg.mode {
2822 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2829 let mut fn_abi = FnAbi {
2830 ret: arg_of(sig.output(), None),
2835 .chain(caller_location)
2837 .map(|(i, ty)| arg_of(ty, Some(i)))
2839 c_variadic: sig.c_variadic,
2840 fixed_count: inputs.len(),
2842 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
2844 fn_abi.adjust_for_abi(cx, sig.abi);
2845 debug!("FnAbi::new_internal = {:?}", fn_abi);
2849 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2850 if abi == SpecAbi::Unadjusted {
2854 if abi == SpecAbi::Rust
2855 || abi == SpecAbi::RustCall
2856 || abi == SpecAbi::RustIntrinsic
2857 || abi == SpecAbi::PlatformIntrinsic
2859 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2860 if arg.is_ignore() {
2864 match arg.layout.abi {
2865 Abi::Aggregate { .. } => {}
2867 // This is a fun case! The gist of what this is doing is
2868 // that we want callers and callees to always agree on the
2869 // ABI of how they pass SIMD arguments. If we were to *not*
2870 // make these arguments indirect then they'd be immediates
2871 // in LLVM, which means that they'd used whatever the
2872 // appropriate ABI is for the callee and the caller. That
2873 // means, for example, if the caller doesn't have AVX
2874 // enabled but the callee does, then passing an AVX argument
2875 // across this boundary would cause corrupt data to show up.
2877 // This problem is fixed by unconditionally passing SIMD
2878 // arguments through memory between callers and callees
2879 // which should get them all to agree on ABI regardless of
2880 // target feature sets. Some more information about this
2881 // issue can be found in #44367.
2883 // Note that the platform intrinsic ABI is exempt here as
2884 // that's how we connect up to LLVM and it's unstable
2885 // anyway, we control all calls to it in libstd.
2887 if abi != SpecAbi::PlatformIntrinsic
2888 && cx.tcx().sess.target.simd_types_indirect =>
2890 arg.make_indirect();
2897 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2898 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2899 let max_by_val_size = Pointer.size(cx) * 2;
2900 let size = arg.layout.size;
2902 if arg.layout.is_unsized() || size > max_by_val_size {
2903 arg.make_indirect();
2905 // We want to pass small aggregates as immediates, but using
2906 // a LLVM aggregate type for this leads to bad optimizations,
2907 // so we pick an appropriately sized integer type instead.
2908 arg.cast_to(Reg { kind: RegKind::Integer, size });
2911 fixup(&mut self.ret);
2912 for arg in &mut self.args {
2918 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2919 cx.tcx().sess.fatal(&msg);