1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
4 use syntax::ast::{self, Ident, IntTy, UintTy};
6 use syntax_pos::DUMMY_SP;
16 use crate::ich::StableHashingContext;
17 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
18 use crate::ty::GeneratorSubsts;
19 use crate::ty::subst::Subst;
20 use rustc_data_structures::bit_set::BitSet;
21 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
22 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
25 pub use rustc_target::abi::*;
26 use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
27 use rustc_target::abi::call::{
28 ArgAttribute, ArgAttributes, ArgType, Conv, FnType, IgnoreMode, PassMode, Reg, RegKind
31 pub trait IntegerExt {
32 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
33 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
43 impl IntegerExt for Integer {
44 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
45 match (*self, signed) {
46 (I8, false) => tcx.types.u8,
47 (I16, false) => tcx.types.u16,
48 (I32, false) => tcx.types.u32,
49 (I64, false) => tcx.types.u64,
50 (I128, false) => tcx.types.u128,
51 (I8, true) => tcx.types.i8,
52 (I16, true) => tcx.types.i16,
53 (I32, true) => tcx.types.i32,
54 (I64, true) => tcx.types.i64,
55 (I128, true) => tcx.types.i128,
59 /// Gets the Integer type from an attr::IntType.
60 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61 let dl = cx.data_layout();
64 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70 dl.ptr_sized_integer()
75 /// Finds the appropriate Integer type and signedness for the given
76 /// signed discriminant range and #[repr] attribute.
77 /// N.B.: u128 values above i128::MAX will be treated as signed, but
78 /// that shouldn't affect anything, other than maybe debuginfo.
85 ) -> (Integer, bool) {
86 // Theoretically, negative values could be larger in unsigned representation
87 // than the unsigned representation of the signed minimum. However, if there
88 // are any negative values, the only valid unsigned representation is u128
89 // which can fit all i128 values, so the result remains unaffected.
90 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
91 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
93 let mut min_from_extern = None;
96 if let Some(ity) = repr.int {
97 let discr = Integer::from_attr(&tcx, ity);
98 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
100 bug!("Integer::repr_discr: `#[repr]` hint too small for \
101 discriminant range of enum `{}", ty)
103 return (discr, ity.is_signed());
107 match &tcx.sess.target.target.arch[..] {
108 // WARNING: the ARM EABI has two variants; the one corresponding
109 // to `at_least == I32` appears to be used on Linux and NetBSD,
110 // but some systems may use the variant corresponding to no
111 // lower bound. However, we don't run on those yet...?
112 "arm" => min_from_extern = Some(I32),
113 _ => min_from_extern = Some(I32),
117 let at_least = min_from_extern.unwrap_or(min_default);
119 // If there are no negative values, we can use the unsigned fit.
121 (cmp::max(unsigned_fit, at_least), false)
123 (cmp::max(signed_fit, at_least), true)
128 pub trait PrimitiveExt {
129 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
132 impl PrimitiveExt for Primitive {
133 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
135 Int(i, signed) => i.to_ty(tcx, signed),
136 Float(FloatTy::F32) => tcx.types.f32,
137 Float(FloatTy::F64) => tcx.types.f64,
138 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
143 /// The first half of a fat pointer.
145 /// - For a trait object, this is the address of the box.
146 /// - For a slice, this is the base address.
147 pub const FAT_PTR_ADDR: usize = 0;
149 /// The second half of a fat pointer.
151 /// - For a trait object, this is the address of the vtable.
152 /// - For a slice, this is the length.
153 pub const FAT_PTR_EXTRA: usize = 1;
155 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
156 pub enum LayoutError<'tcx> {
158 SizeOverflow(Ty<'tcx>)
161 impl<'tcx> fmt::Display for LayoutError<'tcx> {
162 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
164 LayoutError::Unknown(ty) => {
165 write!(f, "the type `{:?}` has an unknown layout", ty)
167 LayoutError::SizeOverflow(ty) => {
168 write!(f, "the type `{:?}` is too big for the current architecture", ty)
176 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
177 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
178 ty::tls::with_related_context(tcx, move |icx| {
179 let rec_limit = *tcx.sess.recursion_limit.get();
180 let (param_env, ty) = query.into_parts();
182 if icx.layout_depth > rec_limit {
184 &format!("overflow representing the type `{}`", ty));
187 // Update the ImplicitCtxt to increase the layout_depth
188 let icx = ty::tls::ImplicitCtxt {
189 layout_depth: icx.layout_depth + 1,
193 ty::tls::enter_context(&icx, |_| {
194 let cx = LayoutCx { tcx, param_env };
195 let layout = cx.layout_raw_uncached(ty);
196 // Type-level uninhabitedness should always imply ABI uninhabitedness.
197 if let Ok(layout) = layout {
198 if ty.conservative_is_privately_uninhabited(tcx) {
199 assert!(layout.abi.is_uninhabited());
207 pub fn provide(providers: &mut ty::query::Providers<'_>) {
208 *providers = ty::query::Providers {
214 pub struct LayoutCx<'tcx, C> {
216 pub param_env: ty::ParamEnv<'tcx>,
219 #[derive(Copy, Clone, Debug)]
221 /// A tuple, closure, or univariant which cannot be coerced to unsized.
223 /// A univariant, the last field of which may be coerced to unsized.
225 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
226 Prefixed(Size, Align),
229 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
230 // This is used to go between `memory_index` (source field order to memory order)
231 // and `inverse_memory_index` (memory order to source field order).
232 // See also `FieldPlacement::Arbitrary::memory_index` for more details.
233 // FIXME(eddyb) build a better abstraction for permutations, if possible.
234 fn invert_mapping(map: &[u32]) -> Vec<u32> {
235 let mut inverse = vec![0; map.len()];
236 for i in 0..map.len() {
237 inverse[map[i] as usize] = i as u32;
242 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
243 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
244 let dl = self.data_layout();
245 let b_align = b.value.align(dl);
246 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
247 let b_offset = a.value.size(dl).align_to(b_align.abi);
248 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
250 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
251 // returns the last maximum.
252 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
254 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
255 .max_by_key(|niche| niche.available(dl));
258 variants: Variants::Single { index: VariantIdx::new(0) },
259 fields: FieldPlacement::Arbitrary {
260 offsets: vec![Size::ZERO, b_offset],
261 memory_index: vec![0, 1]
263 abi: Abi::ScalarPair(a, b),
270 fn univariant_uninterned(&self,
272 fields: &[TyLayout<'_>],
274 kind: StructKind) -> Result<LayoutDetails, LayoutError<'tcx>> {
275 let dl = self.data_layout();
276 let packed = repr.packed();
277 if packed && repr.align > 0 {
278 bug!("struct cannot be packed and aligned");
281 let pack = Align::from_bytes(repr.pack as u64).unwrap();
283 let mut align = if packed {
289 let mut sized = true;
290 let mut offsets = vec![Size::ZERO; fields.len()];
291 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
293 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
294 if let StructKind::Prefixed(_, align) = kind {
295 optimize &= align.bytes() == 1;
299 let end = if let StructKind::MaybeUnsized = kind {
304 let optimizing = &mut inverse_memory_index[..end];
305 let field_align = |f: &TyLayout<'_>| {
306 if packed { f.align.abi.min(pack) } else { f.align.abi }
309 StructKind::AlwaysSized |
310 StructKind::MaybeUnsized => {
311 optimizing.sort_by_key(|&x| {
312 // Place ZSTs first to avoid "interesting offsets",
313 // especially with only one or two non-ZST fields.
314 let f = &fields[x as usize];
315 (!f.is_zst(), cmp::Reverse(field_align(f)))
318 StructKind::Prefixed(..) => {
319 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
324 // inverse_memory_index holds field indices by increasing memory offset.
325 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
326 // We now write field offsets to the corresponding offset slot;
327 // field 5 with offset 0 puts 0 in offsets[5].
328 // At the bottom of this function, we invert `inverse_memory_index` to
329 // produce `memory_index` (see `invert_mapping`).
332 let mut offset = Size::ZERO;
333 let mut largest_niche = None;
334 let mut largest_niche_available = 0;
336 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
337 let prefix_align = if packed {
338 prefix_align.min(pack)
342 align = align.max(AbiAndPrefAlign::new(prefix_align));
343 offset = prefix_size.align_to(prefix_align);
346 for &i in &inverse_memory_index {
347 let field = fields[i as usize];
349 bug!("univariant: field #{} of `{}` comes after unsized field",
353 if field.is_unsized() {
357 // Invariant: offset < dl.obj_size_bound() <= 1<<61
358 let field_align = if packed {
359 field.align.min(AbiAndPrefAlign::new(pack))
363 offset = offset.align_to(field_align.abi);
364 align = align.max(field_align);
366 debug!("univariant offset: {:?} field: {:#?}", offset, field);
367 offsets[i as usize] = offset;
369 if let Some(mut niche) = field.largest_niche.clone() {
370 let available = niche.available(dl);
371 if available > largest_niche_available {
372 largest_niche_available = available;
373 niche.offset += offset;
374 largest_niche = Some(niche);
378 offset = offset.checked_add(field.size, dl)
379 .ok_or(LayoutError::SizeOverflow(ty))?;
383 let repr_align = repr.align as u64;
384 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
385 debug!("univariant repr_align: {:?}", repr_align);
388 debug!("univariant min_size: {:?}", offset);
389 let min_size = offset;
391 // As stated above, inverse_memory_index holds field indices by increasing offset.
392 // This makes it an already-sorted view of the offsets vec.
393 // To invert it, consider:
394 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
395 // Field 5 would be the first element, so memory_index is i:
396 // Note: if we didn't optimize, it's already right.
400 memory_index = invert_mapping(&inverse_memory_index);
402 memory_index = inverse_memory_index;
405 let size = min_size.align_to(align.abi);
406 let mut abi = Abi::Aggregate { sized };
408 // Unpack newtype ABIs and find scalar pairs.
409 if sized && size.bytes() > 0 {
410 // All other fields must be ZSTs, and we need them to all start at 0.
411 let mut zst_offsets =
412 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
413 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
414 let mut non_zst_fields =
415 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
417 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
418 // We have exactly one non-ZST field.
419 (Some((i, field)), None, None) => {
420 // Field fills the struct and it has a scalar or scalar pair ABI.
421 if offsets[i].bytes() == 0 &&
422 align.abi == field.align.abi &&
425 // For plain scalars, or vectors of them, we can't unpack
426 // newtypes for `#[repr(C)]`, as that affects C ABIs.
427 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
428 abi = field.abi.clone();
430 // But scalar pairs are Rust-specific and get
431 // treated as aggregates by C ABIs anyway.
432 Abi::ScalarPair(..) => {
433 abi = field.abi.clone();
440 // Two non-ZST fields, and they're both scalars.
441 (Some((i, &TyLayout {
442 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
443 })), Some((j, &TyLayout {
444 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
446 // Order by the memory placement, not source order.
447 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
452 let pair = self.scalar_pair(a.clone(), b.clone());
453 let pair_offsets = match pair.fields {
454 FieldPlacement::Arbitrary {
458 assert_eq!(memory_index, &[0, 1]);
463 if offsets[i] == pair_offsets[0] &&
464 offsets[j] == pair_offsets[1] &&
465 align == pair.align &&
467 // We can use `ScalarPair` only when it matches our
468 // already computed layout (including `#[repr(C)]`).
478 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
479 abi = Abi::Uninhabited;
483 variants: Variants::Single { index: VariantIdx::new(0) },
484 fields: FieldPlacement::Arbitrary {
495 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
497 let param_env = self.param_env;
498 let dl = self.data_layout();
499 let scalar_unit = |value: Primitive| {
500 let bits = value.size(dl).bits();
501 assert!(bits <= 128);
504 valid_range: 0..=(!0 >> (128 - bits))
507 let scalar = |value: Primitive| {
508 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
511 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
512 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
514 debug_assert!(!ty.has_infer_types());
519 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
520 value: Int(I8, false),
525 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
526 value: Int(I32, false),
527 valid_range: 0..=0x10FFFF
531 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
534 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
536 ty::Float(fty) => scalar(Float(fty)),
538 let mut ptr = scalar_unit(Pointer);
539 ptr.valid_range = 1..=*ptr.valid_range.end();
540 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
545 tcx.intern_layout(LayoutDetails {
546 variants: Variants::Single { index: VariantIdx::new(0) },
547 fields: FieldPlacement::Union(0),
548 abi: Abi::Uninhabited,
555 // Potentially-fat pointers.
556 ty::Ref(_, pointee, _) |
557 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
558 let mut data_ptr = scalar_unit(Pointer);
559 if !ty.is_unsafe_ptr() {
560 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
563 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
564 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
565 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
568 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
569 let metadata = match unsized_part.sty {
571 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
573 ty::Slice(_) | ty::Str => {
574 scalar_unit(Int(dl.ptr_sized_integer(), false))
577 let mut vtable = scalar_unit(Pointer);
578 vtable.valid_range = 1..=*vtable.valid_range.end();
581 _ => return Err(LayoutError::Unknown(unsized_part))
584 // Effectively a (ptr, meta) tuple.
585 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
588 // Arrays and slices.
589 ty::Array(element, mut count) => {
590 if count.has_projections() {
591 count = tcx.normalize_erasing_regions(param_env, count);
592 if count.has_projections() {
593 return Err(LayoutError::Unknown(ty));
597 let count = count.assert_usize(tcx).ok_or(LayoutError::Unknown(ty))?;
598 let element = self.layout_of(element)?;
599 let size = element.size.checked_mul(count, dl)
600 .ok_or(LayoutError::SizeOverflow(ty))?;
602 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
605 Abi::Aggregate { sized: true }
608 let largest_niche = if count != 0 {
609 element.largest_niche.clone()
614 tcx.intern_layout(LayoutDetails {
615 variants: Variants::Single { index: VariantIdx::new(0) },
616 fields: FieldPlacement::Array {
617 stride: element.size,
622 align: element.align,
626 ty::Slice(element) => {
627 let element = self.layout_of(element)?;
628 tcx.intern_layout(LayoutDetails {
629 variants: Variants::Single { index: VariantIdx::new(0) },
630 fields: FieldPlacement::Array {
631 stride: element.size,
634 abi: Abi::Aggregate { sized: false },
636 align: element.align,
641 tcx.intern_layout(LayoutDetails {
642 variants: Variants::Single { index: VariantIdx::new(0) },
643 fields: FieldPlacement::Array {
644 stride: Size::from_bytes(1),
647 abi: Abi::Aggregate { sized: false },
656 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
658 ty::Dynamic(..) | ty::Foreign(..) => {
659 let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(),
660 StructKind::AlwaysSized)?;
662 Abi::Aggregate { ref mut sized } => *sized = false,
665 tcx.intern_layout(unit)
668 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, &substs)?,
670 ty::Closure(def_id, ref substs) => {
671 let tys = substs.upvar_tys(def_id, tcx);
672 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
673 &ReprOptions::default(),
674 StructKind::AlwaysSized)?
678 let kind = if tys.len() == 0 {
679 StructKind::AlwaysSized
681 StructKind::MaybeUnsized
684 univariant(&tys.iter().map(|k| {
685 self.layout_of(k.expect_ty())
686 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
689 // SIMD vector types.
690 ty::Adt(def, ..) if def.repr.simd() => {
691 let element = self.layout_of(ty.simd_type(tcx))?;
692 let count = ty.simd_size(tcx) as u64;
694 let scalar = match element.abi {
695 Abi::Scalar(ref scalar) => scalar.clone(),
697 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
698 a non-machine element type `{}`",
702 let size = element.size.checked_mul(count, dl)
703 .ok_or(LayoutError::SizeOverflow(ty))?;
704 let align = dl.vector_align(size);
705 let size = size.align_to(align.abi);
707 tcx.intern_layout(LayoutDetails {
708 variants: Variants::Single { index: VariantIdx::new(0) },
709 fields: FieldPlacement::Array {
710 stride: element.size,
717 largest_niche: element.largest_niche.clone(),
724 ty::Adt(def, substs) => {
725 // Cache the field layouts.
726 let variants = def.variants.iter().map(|v| {
727 v.fields.iter().map(|field| {
728 self.layout_of(field.ty(tcx, substs))
729 }).collect::<Result<Vec<_>, _>>()
730 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
733 let packed = def.repr.packed();
734 if packed && def.repr.align > 0 {
735 bug!("Union cannot be packed and aligned");
738 let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
740 let mut align = if packed {
746 if def.repr.align > 0 {
747 let repr_align = def.repr.align as u64;
749 AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
752 let optimize = !def.repr.inhibit_union_abi_opt();
753 let mut size = Size::ZERO;
754 let mut abi = Abi::Aggregate { sized: true };
755 let index = VariantIdx::new(0);
756 for field in &variants[index] {
757 assert!(!field.is_unsized());
759 let field_align = if packed {
760 field.align.min(AbiAndPrefAlign::new(pack))
764 align = align.max(field_align);
766 // If all non-ZST fields have the same ABI, forward this ABI
767 if optimize && !field.is_zst() {
768 // Normalize scalar_unit to the maximal valid range
769 let field_abi = match &field.abi {
770 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
771 Abi::ScalarPair(x, y) => {
773 scalar_unit(x.value),
774 scalar_unit(y.value),
777 Abi::Vector { element: x, count } => {
779 element: scalar_unit(x.value),
784 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
787 if size == Size::ZERO {
788 // first non ZST: initialize 'abi'
790 } else if abi != field_abi {
791 // different fields have different ABI: reset to Aggregate
792 abi = Abi::Aggregate { sized: true };
796 size = cmp::max(size, field.size);
799 return Ok(tcx.intern_layout(LayoutDetails {
800 variants: Variants::Single { index },
801 fields: FieldPlacement::Union(variants[index].len()),
805 size: size.align_to(align.abi)
809 // A variant is absent if it's uninhabited and only has ZST fields.
810 // Present uninhabited variants only require space for their fields,
811 // but *not* an encoding of the discriminant (e.g., a tag value).
812 // See issue #49298 for more details on the need to leave space
813 // for non-ZST uninhabited data (mostly partial initialization).
814 let absent = |fields: &[TyLayout<'_>]| {
815 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
816 let is_zst = fields.iter().all(|f| f.is_zst());
817 uninhabited && is_zst
819 let (present_first, present_second) = {
820 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
827 (present_variants.next(), present_variants.next())
829 if present_first.is_none() {
830 // Uninhabited because it has no variants, or only absent ones.
831 return tcx.layout_raw(param_env.and(tcx.types.never));
834 let is_struct = !def.is_enum() ||
835 // Only one variant is present.
836 (present_second.is_none() &&
837 // Representation optimizations are allowed.
838 !def.repr.inhibit_enum_layout_opt());
840 // Struct, or univariant enum equivalent to a struct.
841 // (Typechecking will reject discriminant-sizing attrs.)
843 let v = present_first.unwrap();
844 let kind = if def.is_enum() || variants[v].len() == 0 {
845 StructKind::AlwaysSized
847 let param_env = tcx.param_env(def.did);
848 let last_field = def.variants[v].fields.last().unwrap();
849 let always_sized = tcx.type_of(last_field.did)
850 .is_sized(tcx.at(DUMMY_SP), param_env);
851 if !always_sized { StructKind::MaybeUnsized }
852 else { StructKind::AlwaysSized }
855 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
856 st.variants = Variants::Single { index: v };
857 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
859 Abi::Scalar(ref mut scalar) |
860 Abi::ScalarPair(ref mut scalar, _) => {
861 // the asserts ensure that we are not using the
862 // `#[rustc_layout_scalar_valid_range(n)]`
863 // attribute to widen the range of anything as that would probably
864 // result in UB somewhere
865 // FIXME(eddyb) the asserts are probably not needed,
866 // as larger validity ranges would result in missed
867 // optimizations, *not* wrongly assuming the inner
868 // value is valid. e.g. unions enlarge validity ranges,
869 // because the values may be uninitialized.
870 if let Bound::Included(start) = start {
871 // FIXME(eddyb) this might be incorrect - it doesn't
872 // account for wrap-around (end < start) ranges.
873 assert!(*scalar.valid_range.start() <= start);
874 scalar.valid_range = start..=*scalar.valid_range.end();
876 if let Bound::Included(end) = end {
877 // FIXME(eddyb) this might be incorrect - it doesn't
878 // account for wrap-around (end < start) ranges.
879 assert!(*scalar.valid_range.end() >= end);
880 scalar.valid_range = *scalar.valid_range.start()..=end;
883 // Update `largest_niche` if we have introduced a larger niche.
884 let niche = Niche::from_scalar(dl, Size::ZERO, scalar.clone());
885 if let Some(niche) = niche {
886 match &st.largest_niche {
887 Some(largest_niche) => {
888 // Replace the existing niche even if they're equal,
889 // because this one is at a lower offset.
890 if largest_niche.available(dl) <= niche.available(dl) {
891 st.largest_niche = Some(niche);
894 None => st.largest_niche = Some(niche),
899 start == Bound::Unbounded && end == Bound::Unbounded,
900 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
906 return Ok(tcx.intern_layout(st));
909 // The current code for niche-filling relies on variant indices
910 // instead of actual discriminants, so dataful enums with
911 // explicit discriminants (RFC #2363) would misbehave.
912 let no_explicit_discriminants = def.variants.iter_enumerated()
913 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
915 // Niche-filling enum optimization.
916 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
917 let mut dataful_variant = None;
918 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
920 // Find one non-ZST variant.
921 'variants: for (v, fields) in variants.iter_enumerated() {
927 if dataful_variant.is_none() {
928 dataful_variant = Some(v);
931 dataful_variant = None;
936 niche_variants = *niche_variants.start().min(&v)..=v;
939 if niche_variants.start() > niche_variants.end() {
940 dataful_variant = None;
943 if let Some(i) = dataful_variant {
945 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
947 // FIXME(#62691) use the largest niche across all fields,
948 // not just the first one.
949 for (field_index, &field) in variants[i].iter().enumerate() {
950 let niche = match &field.largest_niche {
951 Some(niche) => niche,
954 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
959 let mut align = dl.aggregate_align;
960 let st = variants.iter_enumerated().map(|(j, v)| {
961 let mut st = self.univariant_uninterned(ty, v,
962 &def.repr, StructKind::AlwaysSized)?;
963 st.variants = Variants::Single { index: j };
965 align = align.max(st.align);
968 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
970 let offset = st[i].fields.offset(field_index) + niche.offset;
971 let size = st[i].size;
973 let mut abi = match st[i].abi {
974 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
975 Abi::ScalarPair(ref first, ref second) => {
976 // We need to use scalar_unit to reset the
977 // valid range to the maximal one for that
978 // primitive, because only the niche is
979 // guaranteed to be initialised, not the
981 if offset.bytes() == 0 {
983 niche_scalar.clone(),
984 scalar_unit(second.value),
988 scalar_unit(first.value),
989 niche_scalar.clone(),
993 _ => Abi::Aggregate { sized: true },
996 if st.iter().all(|v| v.abi.is_uninhabited()) {
997 abi = Abi::Uninhabited;
1002 Niche::from_scalar(dl, offset, niche_scalar.clone());
1004 return Ok(tcx.intern_layout(LayoutDetails {
1005 variants: Variants::Multiple {
1006 discr: niche_scalar,
1007 discr_kind: DiscriminantKind::Niche {
1015 fields: FieldPlacement::Arbitrary {
1016 offsets: vec![offset],
1017 memory_index: vec![0]
1028 let (mut min, mut max) = (i128::max_value(), i128::min_value());
1029 let discr_type = def.repr.discr_type();
1030 let bits = Integer::from_attr(self, discr_type).size().bits();
1031 for (i, discr) in def.discriminants(tcx) {
1032 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1035 let mut x = discr.val as i128;
1036 if discr_type.is_signed() {
1037 // sign extend the raw representation to be an i128
1038 x = (x << (128 - bits)) >> (128 - bits);
1040 if x < min { min = x; }
1041 if x > max { max = x; }
1043 // We might have no inhabited variants, so pretend there's at least one.
1044 if (min, max) == (i128::max_value(), i128::min_value()) {
1048 assert!(min <= max, "discriminant range is {}...{}", min, max);
1049 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1051 let mut align = dl.aggregate_align;
1052 let mut size = Size::ZERO;
1054 // We're interested in the smallest alignment, so start large.
1055 let mut start_align = Align::from_bytes(256).unwrap();
1056 assert_eq!(Integer::for_align(dl, start_align), None);
1058 // repr(C) on an enum tells us to make a (tag, union) layout,
1059 // so we need to grow the prefix alignment to be at least
1060 // the alignment of the union. (This value is used both for
1061 // determining the alignment of the overall enum, and the
1062 // determining the alignment of the payload after the tag.)
1063 let mut prefix_align = min_ity.align(dl).abi;
1065 for fields in &variants {
1066 for field in fields {
1067 prefix_align = prefix_align.max(field.align.abi);
1072 // Create the set of structs that represent each variant.
1073 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
1074 let mut st = self.univariant_uninterned(ty, &field_layouts,
1075 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1076 st.variants = Variants::Single { index: i };
1077 // Find the first field we can't move later
1078 // to make room for a larger discriminant.
1079 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1080 if !field.is_zst() || field.align.abi.bytes() != 1 {
1081 start_align = start_align.min(field.align.abi);
1085 size = cmp::max(size, st.size);
1086 align = align.max(st.align);
1088 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1090 // Align the maximum variant size to the largest alignment.
1091 size = size.align_to(align.abi);
1093 if size.bytes() >= dl.obj_size_bound() {
1094 return Err(LayoutError::SizeOverflow(ty));
1097 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1098 if typeck_ity < min_ity {
1099 // It is a bug if Layout decided on a greater discriminant size than typeck for
1100 // some reason at this point (based on values discriminant can take on). Mostly
1101 // because this discriminant will be loaded, and then stored into variable of
1102 // type calculated by typeck. Consider such case (a bug): typeck decided on
1103 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1104 // discriminant values. That would be a bug, because then, in codegen, in order
1105 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1106 // space necessary to represent would have to be discarded (or layout is wrong
1107 // on thinking it needs 16 bits)
1108 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1109 min_ity, typeck_ity);
1110 // However, it is fine to make discr type however large (as an optimisation)
1111 // after this point – we’ll just truncate the value we load in codegen.
1114 // Check to see if we should use a different type for the
1115 // discriminant. We can safely use a type with the same size
1116 // as the alignment of the first field of each variant.
1117 // We increase the size of the discriminant to avoid LLVM copying
1118 // padding when it doesn't need to. This normally causes unaligned
1119 // load/stores and excessive memcpy/memset operations. By using a
1120 // bigger integer size, LLVM can be sure about its contents and
1121 // won't be so conservative.
1123 // Use the initial field alignment
1124 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1127 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1130 // If the alignment is not larger than the chosen discriminant size,
1131 // don't use the alignment as the final size.
1135 // Patch up the variants' first few fields.
1136 let old_ity_size = min_ity.size();
1137 let new_ity_size = ity.size();
1138 for variant in &mut layout_variants {
1139 match variant.fields {
1140 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1142 if *i <= old_ity_size {
1143 assert_eq!(*i, old_ity_size);
1147 // We might be making the struct larger.
1148 if variant.size <= old_ity_size {
1149 variant.size = new_ity_size;
1157 let tag_mask = !0u128 >> (128 - ity.size().bits());
1159 value: Int(ity, signed),
1160 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1162 let mut abi = Abi::Aggregate { sized: true };
1163 if tag.value.size(dl) == size {
1164 abi = Abi::Scalar(tag.clone());
1166 // Try to use a ScalarPair for all tagged enums.
1167 let mut common_prim = None;
1168 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1169 let offsets = match layout_variant.fields {
1170 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1173 let mut fields = field_layouts
1176 .filter(|p| !p.0.is_zst());
1177 let (field, offset) = match (fields.next(), fields.next()) {
1178 (None, None) => continue,
1179 (Some(pair), None) => pair,
1185 let prim = match field.details.abi {
1186 Abi::Scalar(ref scalar) => scalar.value,
1192 if let Some(pair) = common_prim {
1193 // This is pretty conservative. We could go fancier
1194 // by conflating things like i32 and u32, or even
1195 // realising that (u8, u8) could just cohabit with
1197 if pair != (prim, offset) {
1202 common_prim = Some((prim, offset));
1205 if let Some((prim, offset)) = common_prim {
1206 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1207 let pair_offsets = match pair.fields {
1208 FieldPlacement::Arbitrary {
1212 assert_eq!(memory_index, &[0, 1]);
1217 if pair_offsets[0] == Size::ZERO &&
1218 pair_offsets[1] == *offset &&
1219 align == pair.align &&
1221 // We can use `ScalarPair` only when it matches our
1222 // already computed layout (including `#[repr(C)]`).
1228 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1229 abi = Abi::Uninhabited;
1232 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1234 tcx.intern_layout(LayoutDetails {
1235 variants: Variants::Multiple {
1237 discr_kind: DiscriminantKind::Tag,
1239 variants: layout_variants,
1241 fields: FieldPlacement::Arbitrary {
1242 offsets: vec![Size::ZERO],
1243 memory_index: vec![0]
1252 // Types with no meaningful known layout.
1253 ty::Projection(_) | ty::Opaque(..) => {
1254 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1255 if ty == normalized {
1256 return Err(LayoutError::Unknown(ty));
1258 tcx.layout_raw(param_env.and(normalized))?
1262 ty::Placeholder(..) |
1263 ty::UnnormalizedProjection(..) |
1264 ty::GeneratorWitness(..) |
1266 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1269 ty::Param(_) | ty::Error => {
1270 return Err(LayoutError::Unknown(ty));
1276 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1277 #[derive(Clone, Debug, PartialEq)]
1278 enum SavedLocalEligibility {
1280 Assigned(VariantIdx),
1281 // FIXME: Use newtype_index so we aren't wasting bytes
1282 Ineligible(Option<u32>),
1285 // When laying out generators, we divide our saved local fields into two
1286 // categories: overlap-eligible and overlap-ineligible.
1288 // Those fields which are ineligible for overlap go in a "prefix" at the
1289 // beginning of the layout, and always have space reserved for them.
1291 // Overlap-eligible fields are only assigned to one variant, so we lay
1292 // those fields out for each variant and put them right after the
1295 // Finally, in the layout details, we point to the fields from the
1296 // variants they are assigned to. It is possible for some fields to be
1297 // included in multiple variants. No field ever "moves around" in the
1298 // layout; its offset is always the same.
1300 // Also included in the layout are the upvars and the discriminant.
1301 // These are included as fields on the "outer" layout; they are not part
1303 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1304 /// Compute the eligibility and assignment of each local.
1305 fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>)
1306 -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1307 use SavedLocalEligibility::*;
1309 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1310 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1312 // The saved locals not eligible for overlap. These will get
1313 // "promoted" to the prefix of our generator.
1314 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1316 // Figure out which of our saved locals are fields in only
1317 // one variant. The rest are deemed ineligible for overlap.
1318 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1319 for local in fields {
1320 match assignments[*local] {
1322 assignments[*local] = Assigned(variant_index);
1325 // We've already seen this local at another suspension
1326 // point, so it is no longer a candidate.
1327 trace!("removing local {:?} in >1 variant ({:?}, {:?})",
1328 local, variant_index, idx);
1329 ineligible_locals.insert(*local);
1330 assignments[*local] = Ineligible(None);
1332 Ineligible(_) => {},
1337 // Next, check every pair of eligible locals to see if they
1339 for local_a in info.storage_conflicts.rows() {
1340 let conflicts_a = info.storage_conflicts.count(local_a);
1341 if ineligible_locals.contains(local_a) {
1345 for local_b in info.storage_conflicts.iter(local_a) {
1346 // local_a and local_b are storage live at the same time, therefore they
1347 // cannot overlap in the generator layout. The only way to guarantee
1348 // this is if they are in the same variant, or one is ineligible
1349 // (which means it is stored in every variant).
1350 if ineligible_locals.contains(local_b) ||
1351 assignments[local_a] == assignments[local_b]
1356 // If they conflict, we will choose one to make ineligible.
1357 // This is not always optimal; it's just a greedy heuristic that
1358 // seems to produce good results most of the time.
1359 let conflicts_b = info.storage_conflicts.count(local_b);
1360 let (remove, other) = if conflicts_a > conflicts_b {
1365 ineligible_locals.insert(remove);
1366 assignments[remove] = Ineligible(None);
1367 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1371 // Write down the order of our locals that will be promoted to the prefix.
1374 for local in ineligible_locals.iter() {
1375 assignments[local] = Ineligible(Some(idx));
1379 debug!("generator saved local assignments: {:?}", assignments);
1381 (ineligible_locals, assignments)
1384 /// Compute the full generator layout.
1385 fn generator_layout(
1388 def_id: hir::def_id::DefId,
1389 substs: &GeneratorSubsts<'tcx>,
1390 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1391 use SavedLocalEligibility::*;
1394 let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) };
1396 let info = tcx.generator_layout(def_id);
1397 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1399 // Build a prefix layout, including "promoting" all ineligible
1400 // locals as part of the prefix. We compute the layout of all of
1401 // these fields at once to get optimal packing.
1402 let discr_index = substs.prefix_tys(def_id, tcx).count();
1403 // FIXME(eddyb) set the correct vaidity range for the discriminant.
1404 let discr_layout = self.layout_of(substs.discr_ty(tcx))?;
1405 let discr = match &discr_layout.abi {
1406 Abi::Scalar(s) => s.clone(),
1409 // FIXME(eddyb) wrap each promoted type in `MaybeUninit` so that they
1410 // don't poison the `largest_niche` or `abi` fields of `prefix`.
1411 let promoted_layouts = ineligible_locals.iter()
1412 .map(|local| subst_field(info.field_tys[local]))
1413 .map(|ty| self.layout_of(ty));
1414 let prefix_layouts = substs.prefix_tys(def_id, tcx)
1415 .map(|ty| self.layout_of(ty))
1416 .chain(iter::once(Ok(discr_layout)))
1417 .chain(promoted_layouts)
1418 .collect::<Result<Vec<_>, _>>()?;
1419 let mut prefix = self.univariant_uninterned(
1422 &ReprOptions::default(),
1423 StructKind::AlwaysSized,
1425 // FIXME(eddyb) need `MaybeUninit` around promoted types (see above).
1426 prefix.largest_niche = None;
1428 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1430 // Split the prefix layout into the "outer" fields (upvars and
1431 // discriminant) and the "promoted" fields. Promoted fields will
1432 // get included in each variant that requested them in
1434 debug!("prefix = {:#?}", prefix);
1435 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1436 FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1437 let mut inverse_memory_index = invert_mapping(&memory_index);
1439 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1440 // "outer" and "promoted" fields respectively.
1441 let b_start = (discr_index + 1) as u32;
1442 let offsets_b = offsets.split_off(b_start as usize);
1443 let offsets_a = offsets;
1445 // Disentangle the "a" and "b" components of `inverse_memory_index`
1446 // by preserving the order but keeping only one disjoint "half" each.
1447 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1448 let inverse_memory_index_b: Vec<_> =
1449 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1450 inverse_memory_index.retain(|&i| i < b_start);
1451 let inverse_memory_index_a = inverse_memory_index;
1453 // Since `inverse_memory_index_{a,b}` each only refer to their
1454 // respective fields, they can be safely inverted
1455 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1456 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1458 let outer_fields = FieldPlacement::Arbitrary {
1460 memory_index: memory_index_a,
1462 (outer_fields, offsets_b, memory_index_b)
1467 let mut size = prefix.size;
1468 let mut align = prefix.align;
1469 let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| {
1470 // Only include overlap-eligible fields when we compute our variant layout.
1471 let variant_only_tys = variant_fields
1474 match assignments[**local] {
1475 Unassigned => bug!(),
1476 Assigned(v) if v == index => true,
1477 Assigned(_) => bug!("assignment does not match variant"),
1478 Ineligible(_) => false,
1481 .map(|local| subst_field(info.field_tys[*local]));
1483 let mut variant = self.univariant_uninterned(
1486 .map(|ty| self.layout_of(ty))
1487 .collect::<Result<Vec<_>, _>>()?,
1488 &ReprOptions::default(),
1489 StructKind::Prefixed(prefix_size, prefix_align.abi))?;
1490 variant.variants = Variants::Single { index };
1492 let (offsets, memory_index) = match variant.fields {
1493 FieldPlacement::Arbitrary { offsets, memory_index } => {
1494 (offsets, memory_index)
1499 // Now, stitch the promoted and variant-only fields back together in
1500 // the order they are mentioned by our GeneratorLayout.
1501 // Because we only use some subset (that can differ between variants)
1502 // of the promoted fields, we can't just pick those elements of the
1503 // `promoted_memory_index` (as we'd end up with gaps).
1504 // So instead, we build an "inverse memory_index", as if all of the
1505 // promoted fields were being used, but leave the elements not in the
1506 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1507 // obtain a valid (bijective) mapping.
1508 const INVALID_FIELD_IDX: u32 = !0;
1509 let mut combined_inverse_memory_index =
1510 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1511 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1512 let combined_offsets = variant_fields.iter().enumerate().map(|(i, local)| {
1513 let (offset, memory_index) = match assignments[*local] {
1514 Unassigned => bug!(),
1516 let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1517 (offset, promoted_memory_index.len() as u32 + memory_index)
1519 Ineligible(field_idx) => {
1520 let field_idx = field_idx.unwrap() as usize;
1521 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1524 combined_inverse_memory_index[memory_index as usize] = i as u32;
1528 // Remove the unused slots and invert the mapping to obtain the
1529 // combined `memory_index` (also see previous comment).
1530 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1531 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1533 variant.fields = FieldPlacement::Arbitrary {
1534 offsets: combined_offsets,
1535 memory_index: combined_memory_index,
1538 size = size.max(variant.size);
1539 align = align.max(variant.align);
1541 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1543 size = size.align_to(align.abi);
1545 let abi = if prefix.abi.is_uninhabited() ||
1546 variants.iter().all(|v| v.abi.is_uninhabited()) {
1549 Abi::Aggregate { sized: true }
1552 let layout = tcx.intern_layout(LayoutDetails {
1553 variants: Variants::Multiple {
1555 discr_kind: DiscriminantKind::Tag,
1559 fields: outer_fields,
1561 largest_niche: prefix.largest_niche,
1565 debug!("generator layout ({:?}): {:#?}", ty, layout);
1569 /// This is invoked by the `layout_raw` query to record the final
1570 /// layout of each type.
1572 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1573 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1574 // for dumping later.
1575 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1576 self.record_layout_for_printing_outlined(layout)
1580 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1581 // Ignore layouts that are done with non-empty environments or
1582 // non-monomorphic layouts, as the user only wants to see the stuff
1583 // resulting from the final codegen session.
1585 layout.ty.has_param_types() ||
1586 layout.ty.has_self_ty() ||
1587 !self.param_env.caller_bounds.is_empty()
1592 // (delay format until we actually need it)
1593 let record = |kind, packed, opt_discr_size, variants| {
1594 let type_desc = format!("{:?}", layout.ty);
1595 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1604 let adt_def = match layout.ty.sty {
1605 ty::Adt(ref adt_def, _) => {
1606 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1610 ty::Closure(..) => {
1611 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1612 record(DataTypeKind::Closure, false, None, vec![]);
1617 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1622 let adt_kind = adt_def.adt_kind();
1623 let adt_packed = adt_def.repr.packed();
1625 let build_variant_info = |n: Option<Ident>,
1627 layout: TyLayout<'tcx>| {
1628 let mut min_size = Size::ZERO;
1629 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1630 match layout.field(self, i) {
1632 bug!("no layout found for field {}: `{:?}`", name, err);
1634 Ok(field_layout) => {
1635 let offset = layout.fields.offset(i);
1636 let field_end = offset + field_layout.size;
1637 if min_size < field_end {
1638 min_size = field_end;
1640 session::FieldInfo {
1641 name: name.to_string(),
1642 offset: offset.bytes(),
1643 size: field_layout.size.bytes(),
1644 align: field_layout.align.abi.bytes(),
1650 session::VariantInfo {
1651 name: n.map(|n| n.to_string()),
1652 kind: if layout.is_unsized() {
1653 session::SizeKind::Min
1655 session::SizeKind::Exact
1657 align: layout.align.abi.bytes(),
1658 size: if min_size.bytes() == 0 {
1667 match layout.variants {
1668 Variants::Single { index } => {
1669 debug!("print-type-size `{:#?}` variant {}",
1670 layout, adt_def.variants[index].ident);
1671 if !adt_def.variants.is_empty() {
1672 let variant_def = &adt_def.variants[index];
1673 let fields: Vec<_> =
1674 variant_def.fields.iter().map(|f| f.ident.name).collect();
1675 record(adt_kind.into(),
1678 vec![build_variant_info(Some(variant_def.ident),
1682 // (This case arises for *empty* enums; so give it
1684 record(adt_kind.into(), adt_packed, None, vec![]);
1688 Variants::Multiple { ref discr, ref discr_kind, .. } => {
1689 debug!("print-type-size `{:#?}` adt general variants def {}",
1690 layout.ty, adt_def.variants.len());
1691 let variant_infos: Vec<_> =
1692 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1693 let fields: Vec<_> =
1694 variant_def.fields.iter().map(|f| f.ident.name).collect();
1695 build_variant_info(Some(variant_def.ident),
1697 layout.for_variant(self, i))
1700 record(adt_kind.into(), adt_packed, match discr_kind {
1701 DiscriminantKind::Tag => Some(discr.value.size(self)),
1709 /// Type size "skeleton", i.e., the only information determining a type's size.
1710 /// While this is conservative, (aside from constant sizes, only pointers,
1711 /// newtypes thereof and null pointer optimized enums are allowed), it is
1712 /// enough to statically check common use cases of transmute.
1713 #[derive(Copy, Clone, Debug)]
1714 pub enum SizeSkeleton<'tcx> {
1715 /// Any statically computable Layout.
1718 /// A potentially-fat pointer.
1720 /// If true, this pointer is never null.
1722 /// The type which determines the unsized metadata, if any,
1723 /// of this pointer. Either a type parameter or a projection
1724 /// depending on one, with regions erased.
1729 impl<'tcx> SizeSkeleton<'tcx> {
1733 param_env: ty::ParamEnv<'tcx>,
1734 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1735 debug_assert!(!ty.has_infer_types());
1737 // First try computing a static layout.
1738 let err = match tcx.layout_of(param_env.and(ty)) {
1740 return Ok(SizeSkeleton::Known(layout.size));
1746 ty::Ref(_, pointee, _) |
1747 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1748 let non_zero = !ty.is_unsafe_ptr();
1749 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1751 ty::Param(_) | ty::Projection(_) => {
1752 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1753 Ok(SizeSkeleton::Pointer {
1755 tail: tcx.erase_regions(&tail)
1759 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1760 tail `{}` is not a type parameter or a projection",
1766 ty::Adt(def, substs) => {
1767 // Only newtypes and enums w/ nullable pointer optimization.
1768 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1772 // Get a zero-sized variant or a pointer newtype.
1773 let zero_or_ptr_variant = |i| {
1774 let i = VariantIdx::new(i);
1775 let fields = def.variants[i].fields.iter().map(|field| {
1776 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1779 for field in fields {
1782 SizeSkeleton::Known(size) => {
1783 if size.bytes() > 0 {
1787 SizeSkeleton::Pointer {..} => {
1798 let v0 = zero_or_ptr_variant(0)?;
1800 if def.variants.len() == 1 {
1801 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1802 return Ok(SizeSkeleton::Pointer {
1803 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1804 (Bound::Included(start), Bound::Unbounded) => start > 0,
1805 (Bound::Included(start), Bound::Included(end)) =>
1806 0 < start && start < end,
1816 let v1 = zero_or_ptr_variant(1)?;
1817 // Nullable pointer enum optimization.
1819 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1820 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1821 Ok(SizeSkeleton::Pointer {
1830 ty::Projection(_) | ty::Opaque(..) => {
1831 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1832 if ty == normalized {
1835 SizeSkeleton::compute(normalized, tcx, param_env)
1843 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1844 match (self, other) {
1845 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1846 (SizeSkeleton::Pointer { tail: a, .. },
1847 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1853 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1854 fn tcx(&self) -> TyCtxt<'tcx>;
1857 pub trait HasParamEnv<'tcx> {
1858 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1861 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1862 fn data_layout(&self) -> &TargetDataLayout {
1867 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1868 fn tcx(&self) -> TyCtxt<'tcx> {
1873 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1874 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1879 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1880 fn data_layout(&self) -> &TargetDataLayout {
1881 self.tcx.data_layout()
1885 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1886 fn tcx(&self) -> TyCtxt<'tcx> {
1891 pub trait MaybeResult<T> {
1894 fn from(x: Result<T, Self::Error>) -> Self;
1895 fn to_result(self) -> Result<T, Self::Error>;
1898 impl<T> MaybeResult<T> for T {
1901 fn from(x: Result<T, Self::Error>) -> Self {
1905 fn to_result(self) -> Result<T, Self::Error> {
1910 impl<T, E> MaybeResult<T> for Result<T, E> {
1913 fn from(x: Result<T, Self::Error>) -> Self {
1916 fn to_result(self) -> Result<T, Self::Error> {
1921 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1923 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
1925 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1927 /// Computes the layout of a type. Note that this implicitly
1928 /// executes in "reveal all" mode.
1929 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1930 let param_env = self.param_env.with_reveal_all();
1931 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1932 let details = self.tcx.layout_raw(param_env.and(ty))?;
1933 let layout = TyLayout {
1938 // N.B., this recording is normally disabled; when enabled, it
1939 // can however trigger recursive invocations of `layout_of`.
1940 // Therefore, we execute it *after* the main query has
1941 // completed, to avoid problems around recursive structures
1942 // and the like. (Admittedly, I wasn't able to reproduce a problem
1943 // here, but it seems like the right thing to do. -nmatsakis)
1944 self.record_layout_for_printing(layout);
1950 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
1952 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1954 /// Computes the layout of a type. Note that this implicitly
1955 /// executes in "reveal all" mode.
1956 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1957 let param_env = self.param_env.with_reveal_all();
1958 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1959 let details = self.tcx.layout_raw(param_env.and(ty))?;
1960 let layout = TyLayout {
1965 // N.B., this recording is normally disabled; when enabled, it
1966 // can however trigger recursive invocations of `layout_of`.
1967 // Therefore, we execute it *after* the main query has
1968 // completed, to avoid problems around recursive structures
1969 // and the like. (Admittedly, I wasn't able to reproduce a problem
1970 // here, but it seems like the right thing to do. -nmatsakis)
1973 param_env: self.param_env
1975 cx.record_layout_for_printing(layout);
1981 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1983 /// Computes the layout of a type. Note that this implicitly
1984 /// executes in "reveal all" mode.
1986 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1987 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1989 tcx: self.global_tcx(),
1990 param_env: param_env_and_ty.param_env
1992 cx.layout_of(param_env_and_ty.value)
1996 impl ty::query::TyCtxtAt<'tcx> {
1997 /// Computes the layout of a type. Note that this implicitly
1998 /// executes in "reveal all" mode.
2000 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2001 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2003 tcx: self.global_tcx().at(self.span),
2004 param_env: param_env_and_ty.param_env
2006 cx.layout_of(param_env_and_ty.value)
2010 impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
2012 C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
2013 C::TyLayout: MaybeResult<TyLayout<'tcx>>,
2014 C: HasParamEnv<'tcx>,
2016 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
2017 let details = match this.variants {
2018 Variants::Single { index } if index == variant_index => this.details,
2020 Variants::Single { index } => {
2021 // Deny calling for_variant more than once for non-Single enums.
2022 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
2023 assert_eq!(layout.variants, Variants::Single { index });
2026 let fields = match this.ty.sty {
2027 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2031 tcx.intern_layout(LayoutDetails {
2032 variants: Variants::Single { index: variant_index },
2033 fields: FieldPlacement::Union(fields),
2034 abi: Abi::Uninhabited,
2035 largest_niche: None,
2036 align: tcx.data_layout.i8_align,
2041 Variants::Multiple { ref variants, .. } => {
2042 &variants[variant_index]
2046 assert_eq!(details.variants, Variants::Single { index: variant_index });
2054 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
2056 let discr_layout = |discr: &Scalar| -> C::TyLayout {
2057 let layout = LayoutDetails::scalar(cx, discr.clone());
2058 MaybeResult::from(Ok(TyLayout {
2059 details: tcx.intern_layout(layout),
2060 ty: discr.value.to_ty(tcx),
2064 cx.layout_of(match this.ty.sty {
2073 ty::GeneratorWitness(..) |
2075 ty::Dynamic(..) => {
2076 bug!("TyLayout::field_type({:?}): not applicable", this)
2079 // Potentially-fat pointers.
2080 ty::Ref(_, pointee, _) |
2081 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2082 assert!(i < this.fields.count());
2084 // Reuse the fat *T type as its own thin pointer data field.
2085 // This provides information about e.g., DST struct pointees
2086 // (which may have no non-DST form), and will work as long
2087 // as the `Abi` or `FieldPlacement` is checked by users.
2089 let nil = tcx.mk_unit();
2090 let ptr_ty = if this.ty.is_unsafe_ptr() {
2093 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2095 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2096 ptr_layout.ty = this.ty;
2101 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).sty {
2103 ty::Str => tcx.types.usize,
2104 ty::Dynamic(_, _) => {
2106 tcx.lifetimes.re_static,
2107 tcx.mk_array(tcx.types.usize, 3),
2109 /* FIXME: use actual fn pointers
2110 Warning: naively computing the number of entries in the
2111 vtable by counting the methods on the trait + methods on
2112 all parent traits does not work, because some methods can
2113 be not object safe and thus excluded from the vtable.
2114 Increase this counter if you tried to implement this but
2115 failed to do it without duplicating a lot of code from
2116 other places in the compiler: 2
2118 tcx.mk_array(tcx.types.usize, 3),
2119 tcx.mk_array(Option<fn()>),
2123 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
2127 // Arrays and slices.
2128 ty::Array(element, _) |
2129 ty::Slice(element) => element,
2130 ty::Str => tcx.types.u8,
2132 // Tuples, generators and closures.
2133 ty::Closure(def_id, ref substs) => {
2134 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2137 ty::Generator(def_id, ref substs, _) => {
2138 match this.variants {
2139 Variants::Single { index } => {
2140 substs.state_tys(def_id, tcx)
2141 .nth(index.as_usize()).unwrap()
2144 Variants::Multiple { ref discr, discr_index, .. } => {
2145 if i == discr_index {
2146 return discr_layout(discr);
2148 substs.prefix_tys(def_id, tcx).nth(i).unwrap()
2153 ty::Tuple(tys) => tys[i].expect_ty(),
2155 // SIMD vector types.
2156 ty::Adt(def, ..) if def.repr.simd() => {
2157 this.ty.simd_type(tcx)
2161 ty::Adt(def, substs) => {
2162 match this.variants {
2163 Variants::Single { index } => {
2164 def.variants[index].fields[i].ty(tcx, substs)
2167 // Discriminant field for enums (where applicable).
2168 Variants::Multiple { ref discr, .. } => {
2170 return discr_layout(discr);
2175 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
2176 ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
2178 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
2184 this: TyLayout<'tcx>,
2187 ) -> Option<PointeeInfo> {
2189 ty::RawPtr(mt) if offset.bytes() == 0 => {
2190 cx.layout_of(mt.ty).to_result().ok()
2191 .map(|layout| PointeeInfo {
2193 align: layout.align.abi,
2198 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2200 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2201 let kind = match mt {
2202 hir::MutImmutable => if is_freeze {
2207 hir::MutMutable => {
2208 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2209 // panic=abort mode. That was deemed right, as prior versions had many bugs
2210 // in conjunction with unwinding, but later versions didn’t seem to have
2211 // said issues. See issue #31681.
2213 // Alas, later on we encountered a case where noalias would generate wrong
2214 // code altogether even with recent versions of LLVM in *safe* code with no
2215 // unwinding involved. See #54462.
2217 // For now, do not enable mutable_noalias by default at all, while the
2218 // issue is being figured out.
2219 let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
2221 if mutable_noalias {
2222 PointerKind::UniqueBorrowed
2229 cx.layout_of(ty).to_result().ok()
2230 .map(|layout| PointeeInfo {
2232 align: layout.align.abi,
2238 let mut data_variant = match this.variants {
2239 // Within the discriminant field, only the niche itself is
2240 // always initialized, so we only check for a pointer at its
2243 // If the niche is a pointer, it's either valid (according
2244 // to its type), or null (which the niche field's scalar
2245 // validity range encodes). This allows using
2246 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2247 // this will continue to work as long as we don't start
2248 // using more niches than just null (e.g., the first page of
2249 // the address space, or unaligned pointers).
2250 Variants::Multiple {
2251 discr_kind: DiscriminantKind::Niche {
2257 } if this.fields.offset(discr_index) == offset =>
2258 Some(this.for_variant(cx, dataful_variant)),
2262 if let Some(variant) = data_variant {
2263 // We're not interested in any unions.
2264 if let FieldPlacement::Union(_) = variant.fields {
2265 data_variant = None;
2269 let mut result = None;
2271 if let Some(variant) = data_variant {
2272 let ptr_end = offset + Pointer.size(cx);
2273 for i in 0..variant.fields.count() {
2274 let field_start = variant.fields.offset(i);
2275 if field_start <= offset {
2276 let field = variant.field(cx, i);
2277 result = field.to_result().ok()
2279 if ptr_end <= field_start + field.size {
2280 // We found the right field, look inside it.
2281 field.pointee_info_at(cx, offset - field_start)
2286 if result.is_some() {
2293 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2294 if let Some(ref mut pointee) = result {
2295 if let ty::Adt(def, _) = this.ty.sty {
2296 if def.is_box() && offset.bytes() == 0 {
2297 pointee.safe = Some(PointerKind::UniqueOwned);
2308 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
2309 fn hash_stable<W: StableHasherResult>(&self,
2310 hcx: &mut StableHashingContext<'a>,
2311 hasher: &mut StableHasher<W>) {
2312 use crate::ty::layout::Variants::*;
2313 mem::discriminant(self).hash_stable(hcx, hasher);
2316 Single { index } => {
2317 index.hash_stable(hcx, hasher);
2325 discr.hash_stable(hcx, hasher);
2326 discr_kind.hash_stable(hcx, hasher);
2327 discr_index.hash_stable(hcx, hasher);
2328 variants.hash_stable(hcx, hasher);
2334 impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2335 fn hash_stable<W: StableHasherResult>(&self,
2336 hcx: &mut StableHashingContext<'a>,
2337 hasher: &mut StableHasher<W>) {
2338 use crate::ty::layout::DiscriminantKind::*;
2339 mem::discriminant(self).hash_stable(hcx, hasher);
2348 dataful_variant.hash_stable(hcx, hasher);
2349 niche_variants.start().hash_stable(hcx, hasher);
2350 niche_variants.end().hash_stable(hcx, hasher);
2351 niche_start.hash_stable(hcx, hasher);
2357 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
2358 fn hash_stable<W: StableHasherResult>(&self,
2359 hcx: &mut StableHashingContext<'a>,
2360 hasher: &mut StableHasher<W>) {
2361 use crate::ty::layout::FieldPlacement::*;
2362 mem::discriminant(self).hash_stable(hcx, hasher);
2366 count.hash_stable(hcx, hasher);
2368 Array { count, stride } => {
2369 count.hash_stable(hcx, hasher);
2370 stride.hash_stable(hcx, hasher);
2372 Arbitrary { ref offsets, ref memory_index } => {
2373 offsets.hash_stable(hcx, hasher);
2374 memory_index.hash_stable(hcx, hasher);
2380 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2381 fn hash_stable<W: StableHasherResult>(
2383 hcx: &mut StableHashingContext<'a>,
2384 hasher: &mut StableHasher<W>,
2386 self.as_u32().hash_stable(hcx, hasher)
2390 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
2391 fn hash_stable<W: StableHasherResult>(&self,
2392 hcx: &mut StableHashingContext<'a>,
2393 hasher: &mut StableHasher<W>) {
2394 use crate::ty::layout::Abi::*;
2395 mem::discriminant(self).hash_stable(hcx, hasher);
2399 Scalar(ref value) => {
2400 value.hash_stable(hcx, hasher);
2402 ScalarPair(ref a, ref b) => {
2403 a.hash_stable(hcx, hasher);
2404 b.hash_stable(hcx, hasher);
2406 Vector { ref element, count } => {
2407 element.hash_stable(hcx, hasher);
2408 count.hash_stable(hcx, hasher);
2410 Aggregate { sized } => {
2411 sized.hash_stable(hcx, hasher);
2417 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
2418 fn hash_stable<W: StableHasherResult>(&self,
2419 hcx: &mut StableHashingContext<'a>,
2420 hasher: &mut StableHasher<W>) {
2421 let Scalar { value, ref valid_range } = *self;
2422 value.hash_stable(hcx, hasher);
2423 valid_range.start().hash_stable(hcx, hasher);
2424 valid_range.end().hash_stable(hcx, hasher);
2428 impl_stable_hash_for!(struct crate::ty::layout::Niche {
2433 impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
2442 impl_stable_hash_for!(enum crate::ty::layout::Integer {
2450 impl_stable_hash_for!(enum crate::ty::layout::Primitive {
2451 Int(integer, signed),
2456 impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
2461 impl<'tcx> HashStable<StableHashingContext<'tcx>> for Align {
2462 fn hash_stable<W: StableHasherResult>(
2464 hcx: &mut StableHashingContext<'tcx>,
2465 hasher: &mut StableHasher<W>,
2467 self.bytes().hash_stable(hcx, hasher);
2471 impl<'tcx> HashStable<StableHashingContext<'tcx>> for Size {
2472 fn hash_stable<W: StableHasherResult>(
2474 hcx: &mut StableHashingContext<'tcx>,
2475 hasher: &mut StableHasher<W>,
2477 self.bytes().hash_stable(hcx, hasher);
2481 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2482 fn hash_stable<W: StableHasherResult>(&self,
2483 hcx: &mut StableHashingContext<'a>,
2484 hasher: &mut StableHasher<W>) {
2485 use crate::ty::layout::LayoutError::*;
2486 mem::discriminant(self).hash_stable(hcx, hasher);
2490 SizeOverflow(t) => t.hash_stable(hcx, hasher)
2495 pub trait FnTypeExt<'tcx, C>
2497 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2501 + HasParamEnv<'tcx>,
2503 fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self;
2504 fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2505 fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2508 sig: ty::FnSig<'tcx>,
2509 extra_args: &[Ty<'tcx>],
2510 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2512 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2515 impl<'tcx, C> FnTypeExt<'tcx, C> for call::FnType<'tcx, Ty<'tcx>>
2517 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2521 + HasParamEnv<'tcx>,
2523 fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self {
2524 let sig = instance.fn_sig(cx.tcx());
2527 .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2528 call::FnType::new(cx, sig, &[])
2531 fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2532 call::FnType::new_internal(cx, sig, extra_args, |ty, _| ArgType::new(cx.layout_of(ty)))
2535 fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2536 FnTypeExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
2537 let mut layout = cx.layout_of(ty);
2538 // Don't pass the vtable, it's not an argument of the virtual fn.
2539 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2540 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2541 if arg_idx == Some(0) {
2542 let fat_pointer_ty = if layout.is_unsized() {
2543 // unsized `self` is passed as a pointer to `self`
2544 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2545 cx.tcx().mk_mut_ptr(layout.ty)
2548 Abi::ScalarPair(..) => (),
2549 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2552 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2553 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2554 // elsewhere in the compiler as a method on a `dyn Trait`.
2555 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2556 // get a built-in pointer type
2557 let mut fat_pointer_layout = layout;
2558 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2559 && !fat_pointer_layout.ty.is_region_ptr()
2561 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
2562 let field_layout = fat_pointer_layout.field(cx, i);
2564 if !field_layout.is_zst() {
2565 fat_pointer_layout = field_layout;
2566 continue 'descend_newtypes;
2571 "receiver has no non-zero-sized fields {:?}",
2576 fat_pointer_layout.ty
2579 // we now have a type like `*mut RcBox<dyn Trait>`
2580 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2581 // this is understood as a special case elsewhere in the compiler
2582 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2583 layout = cx.layout_of(unit_pointer_ty);
2584 layout.ty = fat_pointer_ty;
2586 ArgType::new(layout)
2592 sig: ty::FnSig<'tcx>,
2593 extra_args: &[Ty<'tcx>],
2594 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2596 debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
2598 use rustc_target::spec::abi::Abi::*;
2599 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2600 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C,
2602 // It's the ABI's job to select this, not ours.
2603 System => bug!("system abi should be selected elsewhere"),
2605 Stdcall => Conv::X86Stdcall,
2606 Fastcall => Conv::X86Fastcall,
2607 Vectorcall => Conv::X86VectorCall,
2608 Thiscall => Conv::X86ThisCall,
2610 Unadjusted => Conv::C,
2611 Win64 => Conv::X86_64Win64,
2612 SysV64 => Conv::X86_64SysV,
2613 Aapcs => Conv::ArmAapcs,
2614 PtxKernel => Conv::PtxKernel,
2615 Msp430Interrupt => Conv::Msp430Intr,
2616 X86Interrupt => Conv::X86Intr,
2617 AmdGpuKernel => Conv::AmdGpuKernel,
2619 // These API constants ought to be more specific...
2623 let mut inputs = sig.inputs();
2624 let extra_args = if sig.abi == RustCall {
2625 assert!(!sig.c_variadic && extra_args.is_empty());
2627 match sig.inputs().last().unwrap().sty {
2628 ty::Tuple(tupled_arguments) => {
2629 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2630 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2634 "argument to function with \"rust-call\" ABI \
2640 assert!(sig.c_variadic || extra_args.is_empty());
2644 let target = &cx.tcx().sess.target.target;
2646 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2648 target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu";
2650 target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu";
2651 let rust_abi = match sig.abi {
2652 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2656 // Handle safe Rust thin and fat pointers.
2657 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2659 layout: TyLayout<'tcx>,
2662 // Booleans are always an i1 that needs to be zero-extended.
2663 if scalar.is_bool() {
2664 attrs.set(ArgAttribute::ZExt);
2668 // Only pointer types handled below.
2669 if scalar.value != Pointer {
2673 if scalar.valid_range.start() < scalar.valid_range.end() {
2674 if *scalar.valid_range.start() > 0 {
2675 attrs.set(ArgAttribute::NonNull);
2679 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2680 if let Some(kind) = pointee.safe {
2681 attrs.pointee_size = pointee.size;
2682 attrs.pointee_align = Some(pointee.align);
2684 // `Box` pointer parameters never alias because ownership is transferred
2685 // `&mut` pointer parameters never alias other parameters,
2686 // or mutable global data
2688 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2689 // and can be marked as both `readonly` and `noalias`, as
2690 // LLVM's definition of `noalias` is based solely on memory
2691 // dependencies rather than pointer equality
2692 let no_alias = match kind {
2693 PointerKind::Shared => false,
2694 PointerKind::UniqueOwned => true,
2695 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2698 attrs.set(ArgAttribute::NoAlias);
2701 if kind == PointerKind::Frozen && !is_return {
2702 attrs.set(ArgAttribute::ReadOnly);
2708 // Store the index of the last argument. This is useful for working with
2709 // C-compatible variadic arguments.
2710 let last_arg_idx = if sig.inputs().is_empty() {
2713 Some(sig.inputs().len() - 1)
2716 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2717 let is_return = arg_idx.is_none();
2718 let mut arg = mk_arg_type(ty, arg_idx);
2719 if arg.layout.is_zst() {
2720 // For some forsaken reason, x86_64-pc-windows-gnu
2721 // doesn't ignore zero-sized struct arguments.
2722 // The same is true for s390x-unknown-linux-gnu
2723 // and sparc64-unknown-linux-gnu.
2724 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
2725 arg.mode = PassMode::Ignore(IgnoreMode::Zst);
2729 // If this is a C-variadic function, this is not the return value,
2730 // and there is one or more fixed arguments; ensure that the `VaListImpl`
2731 // is ignored as an argument.
2733 match (last_arg_idx, arg_idx) {
2734 (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => {
2735 let va_list_did = match cx.tcx().lang_items().va_list() {
2737 None => bug!("`va_list` lang item required for C-variadic functions"),
2740 ty::Adt(def, _) if def.did == va_list_did => {
2741 // This is the "spoofed" `VaListImpl`. Set the arguments mode
2742 // so that it will be ignored.
2743 arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs);
2752 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2753 if !is_return && rust_abi {
2754 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2755 let mut a_attrs = ArgAttributes::new();
2756 let mut b_attrs = ArgAttributes::new();
2757 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2758 adjust_for_rust_scalar(
2762 a.value.size(cx).align_to(b.value.align(cx).abi),
2765 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2770 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2771 if let PassMode::Direct(ref mut attrs) = arg.mode {
2772 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2779 let mut fn_ty = FnType {
2780 ret: arg_of(sig.output(), None),
2786 .map(|(i, ty)| arg_of(ty, Some(i)))
2788 c_variadic: sig.c_variadic,
2791 fn_ty.adjust_for_abi(cx, sig.abi);
2795 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2796 if abi == SpecAbi::Unadjusted {
2800 if abi == SpecAbi::Rust
2801 || abi == SpecAbi::RustCall
2802 || abi == SpecAbi::RustIntrinsic
2803 || abi == SpecAbi::PlatformIntrinsic
2805 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
2806 if arg.is_ignore() {
2810 match arg.layout.abi {
2811 Abi::Aggregate { .. } => {}
2813 // This is a fun case! The gist of what this is doing is
2814 // that we want callers and callees to always agree on the
2815 // ABI of how they pass SIMD arguments. If we were to *not*
2816 // make these arguments indirect then they'd be immediates
2817 // in LLVM, which means that they'd used whatever the
2818 // appropriate ABI is for the callee and the caller. That
2819 // means, for example, if the caller doesn't have AVX
2820 // enabled but the callee does, then passing an AVX argument
2821 // across this boundary would cause corrupt data to show up.
2823 // This problem is fixed by unconditionally passing SIMD
2824 // arguments through memory between callers and callees
2825 // which should get them all to agree on ABI regardless of
2826 // target feature sets. Some more information about this
2827 // issue can be found in #44367.
2829 // Note that the platform intrinsic ABI is exempt here as
2830 // that's how we connect up to LLVM and it's unstable
2831 // anyway, we control all calls to it in libstd.
2833 if abi != SpecAbi::PlatformIntrinsic
2834 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2836 arg.make_indirect();
2843 let size = arg.layout.size;
2844 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2845 arg.make_indirect();
2847 // We want to pass small aggregates as immediates, but using
2848 // a LLVM aggregate type for this leads to bad optimizations,
2849 // so we pick an appropriately sized integer type instead.
2851 kind: RegKind::Integer,
2856 fixup(&mut self.ret);
2857 for arg in &mut self.args {
2860 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2861 attrs.set(ArgAttribute::StructRet);
2866 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2867 cx.tcx().sess.fatal(&msg);